chore: add context to the cache interface (#9565)

This commit is contained in:
Owen Rumney
2025-10-03 10:37:05 +01:00
committed by GitHub
parent 3dd0ebbb56
commit 719ea29d31
28 changed files with 198 additions and 184 deletions

View File

@@ -1,6 +1,7 @@
package cachetest
import (
"context"
"errors"
"testing"
@@ -42,39 +43,39 @@ func NewErrorCache(opts ErrorCacheOptions) *ErrorCache {
}
}
func (c *ErrorCache) MissingBlobs(artifactID string, blobIDs []string) (bool, []string, error) {
func (c *ErrorCache) MissingBlobs(ctx context.Context, artifactID string, blobIDs []string) (bool, []string, error) {
if c.opts.MissingBlobs {
return false, nil, errors.New("MissingBlobs failed")
}
return c.MemoryCache.MissingBlobs(artifactID, blobIDs)
return c.MemoryCache.MissingBlobs(ctx, artifactID, blobIDs)
}
func (c *ErrorCache) PutArtifact(artifactID string, artifactInfo types.ArtifactInfo) error {
func (c *ErrorCache) PutArtifact(ctx context.Context, artifactID string, artifactInfo types.ArtifactInfo) error {
if c.opts.PutArtifact {
return errors.New("PutArtifact failed")
}
return c.MemoryCache.PutArtifact(artifactID, artifactInfo)
return c.MemoryCache.PutArtifact(ctx, artifactID, artifactInfo)
}
func (c *ErrorCache) PutBlob(artifactID string, blobInfo types.BlobInfo) error {
func (c *ErrorCache) PutBlob(ctx context.Context, artifactID string, blobInfo types.BlobInfo) error {
if c.opts.PutBlob {
return errors.New("PutBlob failed")
}
return c.MemoryCache.PutBlob(artifactID, blobInfo)
return c.MemoryCache.PutBlob(ctx, artifactID, blobInfo)
}
func (c *ErrorCache) GetArtifact(artifactID string) (types.ArtifactInfo, error) {
func (c *ErrorCache) GetArtifact(ctx context.Context, artifactID string) (types.ArtifactInfo, error) {
if c.opts.GetArtifact {
return types.ArtifactInfo{}, errors.New("GetArtifact failed")
}
return c.MemoryCache.GetArtifact(artifactID)
return c.MemoryCache.GetArtifact(ctx, artifactID)
}
func (c *ErrorCache) GetBlob(blobID string) (types.BlobInfo, error) {
func (c *ErrorCache) GetBlob(ctx context.Context, blobID string) (types.BlobInfo, error) {
if c.opts.GetBlob {
return types.BlobInfo{}, errors.New("GetBlob failed")
}
return c.MemoryCache.GetBlob(blobID)
return c.MemoryCache.GetBlob(ctx, blobID)
}
func NewCache(t *testing.T, setUpCache func(t *testing.T) cache.Cache) cache.Cache {
@@ -85,7 +86,7 @@ func NewCache(t *testing.T, setUpCache func(t *testing.T) cache.Cache) cache.Cac
}
func AssertArtifact(t *testing.T, c cache.Cache, wantArtifact WantArtifact) {
gotArtifact, err := c.GetArtifact(wantArtifact.ID)
gotArtifact, err := c.GetArtifact(t.Context(), wantArtifact.ID)
require.NoError(t, err, "artifact not found")
assert.Equal(t, wantArtifact.ArtifactInfo, gotArtifact, wantArtifact.ID)
}
@@ -100,7 +101,7 @@ func AssertBlobs(t *testing.T, c cache.Cache, wantBlobs []WantBlob) {
}
for _, want := range wantBlobs {
got, err := c.GetBlob(want.ID)
got, err := c.GetBlob(t.Context(), want.ID)
require.NoError(t, err, "blob not found")
for i := range got.Misconfigurations {

16
pkg/cache/cache.go vendored
View File

@@ -1,6 +1,8 @@
package cache
import (
"context"
"github.com/aquasecurity/trivy/pkg/fanal/types"
)
@@ -21,29 +23,29 @@ type Cache interface {
// ArtifactCache uses local or remote cache
type ArtifactCache interface {
// MissingBlobs returns missing blob IDs such as layer IDs in cache
MissingBlobs(artifactID string, blobIDs []string) (missingArtifact bool, missingBlobIDs []string, err error)
MissingBlobs(ctx context.Context, artifactID string, blobIDs []string) (missingArtifact bool, missingBlobIDs []string, err error)
// PutArtifact stores artifact information such as image metadata in cache
PutArtifact(artifactID string, artifactInfo types.ArtifactInfo) (err error)
PutArtifact(ctx context.Context, artifactID string, artifactInfo types.ArtifactInfo) (err error)
// PutBlob stores blob information such as layer information in local cache
PutBlob(blobID string, blobInfo types.BlobInfo) (err error)
PutBlob(ctx context.Context, blobID string, blobInfo types.BlobInfo) (err error)
// DeleteBlobs removes blobs by IDs
DeleteBlobs(blobIDs []string) error
DeleteBlobs(ctx context.Context, blobIDs []string) error
}
// LocalArtifactCache always uses local cache
type LocalArtifactCache interface {
// GetArtifact gets artifact information such as image metadata from local cache
GetArtifact(artifactID string) (artifactInfo types.ArtifactInfo, err error)
GetArtifact(ctx context.Context, artifactID string) (artifactInfo types.ArtifactInfo, err error)
// GetBlob gets blob information such as layer data from local cache
GetBlob(blobID string) (blobInfo types.BlobInfo, err error)
GetBlob(ctx context.Context, blobID string) (blobInfo types.BlobInfo, err error)
// Close closes the local database
Close() (err error)
// Clear deletes the local database
Clear() (err error)
Clear(ctx context.Context) (err error)
}

17
pkg/cache/fs.go vendored
View File

@@ -1,6 +1,7 @@
package cache
import (
"context"
"encoding/json"
"errors"
"os"
@@ -63,7 +64,7 @@ func NewFSCache(cacheDir string) (FSCache, error) {
}
// GetBlob gets blob information such as layer data from local cache
func (fs FSCache) GetBlob(blobID string) (types.BlobInfo, error) {
func (fs FSCache) GetBlob(_ context.Context, blobID string) (types.BlobInfo, error) {
var blobInfo types.BlobInfo
err := fs.db.View(func(tx *bolt.Tx) error {
var err error
@@ -91,7 +92,7 @@ func (fs FSCache) getBlob(blobBucket *bolt.Bucket, diffID string) (types.BlobInf
}
// PutBlob stores blob information such as layer information in local cache
func (fs FSCache) PutBlob(blobID string, blobInfo types.BlobInfo) error {
func (fs FSCache) PutBlob(_ context.Context, blobID string, blobInfo types.BlobInfo) error {
b, err := json.Marshal(blobInfo)
if err != nil {
return xerrors.Errorf("unable to marshal blob JSON (%s): %w", blobID, err)
@@ -111,7 +112,7 @@ func (fs FSCache) PutBlob(blobID string, blobInfo types.BlobInfo) error {
}
// GetArtifact gets artifact information such as image metadata from local cache
func (fs FSCache) GetArtifact(artifactID string) (types.ArtifactInfo, error) {
func (fs FSCache) GetArtifact(_ context.Context, artifactID string) (types.ArtifactInfo, error) {
var blob []byte
err := fs.db.View(func(tx *bolt.Tx) error {
artifactBucket := tx.Bucket([]byte(artifactBucket))
@@ -130,7 +131,7 @@ func (fs FSCache) GetArtifact(artifactID string) (types.ArtifactInfo, error) {
}
// DeleteBlobs removes blobs by IDs
func (fs FSCache) DeleteBlobs(blobIDs []string) error {
func (fs FSCache) DeleteBlobs(_ context.Context, blobIDs []string) error {
var errs error
err := fs.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(blobBucket))
@@ -148,7 +149,7 @@ func (fs FSCache) DeleteBlobs(blobIDs []string) error {
}
// PutArtifact stores artifact information such as image metadata in local cache
func (fs FSCache) PutArtifact(artifactID string, artifactInfo types.ArtifactInfo) (err error) {
func (fs FSCache) PutArtifact(_ context.Context, artifactID string, artifactInfo types.ArtifactInfo) (err error) {
b, err := json.Marshal(artifactInfo)
if err != nil {
return xerrors.Errorf("unable to marshal artifact JSON (%s): %w", artifactID, err)
@@ -169,7 +170,7 @@ func (fs FSCache) PutArtifact(artifactID string, artifactInfo types.ArtifactInfo
}
// MissingBlobs returns missing blob IDs such as layer IDs
func (fs FSCache) MissingBlobs(artifactID string, blobIDs []string) (bool, []string, error) {
func (fs FSCache) MissingBlobs(ctx context.Context, artifactID string, blobIDs []string) (bool, []string, error) {
var missingArtifact bool
var missingBlobIDs []string
err := fs.db.View(func(tx *bolt.Tx) error {
@@ -192,7 +193,7 @@ func (fs FSCache) MissingBlobs(artifactID string, blobIDs []string) (bool, []str
}
// get artifact info
artifactInfo, err := fs.GetArtifact(artifactID)
artifactInfo, err := fs.GetArtifact(ctx, artifactID)
if err != nil {
// error means cache missed artifact info
return true, missingBlobIDs, nil
@@ -212,7 +213,7 @@ func (fs FSCache) Close() error {
}
// Clear removes the database
func (fs FSCache) Clear() error {
func (fs FSCache) Clear(_ context.Context) error {
if err := fs.Close(); err != nil {
return err
}

16
pkg/cache/fs_test.go vendored
View File

@@ -100,11 +100,11 @@ func TestFSCache_GetBlob(t *testing.T) {
fs, err := NewFSCache(tmpDir)
require.NoError(t, err)
defer func() {
_ = fs.Clear()
_ = fs.Clear(t.Context())
_ = fs.Close()
}()
got, err := fs.GetBlob(tt.args.layerID)
got, err := fs.GetBlob(t.Context(), tt.args.layerID)
assert.Equal(t, tt.wantErr, err != nil, err)
assert.Equal(t, tt.want, got)
})
@@ -276,7 +276,7 @@ func TestFSCache_PutBlob(t *testing.T) {
fs, err := NewFSCache(tmpDir)
require.NoError(t, err)
defer func() {
_ = fs.Clear()
_ = fs.Clear(t.Context())
_ = fs.Close()
}()
@@ -284,7 +284,7 @@ func TestFSCache_PutBlob(t *testing.T) {
require.NoError(t, fs.Close())
}
err = fs.PutBlob(tt.args.diffID, tt.args.layerInfo)
err = fs.PutBlob(t.Context(), tt.args.diffID, tt.args.layerInfo)
if tt.wantErr != "" {
require.ErrorContains(t, err, tt.wantErr, tt.name)
return
@@ -356,11 +356,11 @@ func TestFSCache_PutArtifact(t *testing.T) {
fs, err := NewFSCache(tmpDir)
require.NoError(t, err)
defer func() {
_ = fs.Clear()
_ = fs.Clear(t.Context())
_ = fs.Close()
}()
err = fs.PutArtifact(tt.args.imageID, tt.args.imageConfig)
err = fs.PutArtifact(t.Context(), tt.args.imageID, tt.args.imageConfig)
if tt.wantErr != "" {
require.ErrorContains(t, err, tt.wantErr, tt.name)
return
@@ -475,11 +475,11 @@ func TestFSCache_MissingBlobs(t *testing.T) {
fs, err := NewFSCache(tmpDir)
require.NoError(t, err)
defer func() {
_ = fs.Clear()
_ = fs.Clear(t.Context())
_ = fs.Close()
}()
gotMissingImage, gotMissingLayerIDs, err := fs.MissingBlobs(tt.args.imageID, tt.args.layerIDs)
gotMissingImage, gotMissingLayerIDs, err := fs.MissingBlobs(t.Context(), tt.args.imageID, tt.args.layerIDs)
if tt.wantErr != "" {
assert.ErrorContains(t, err, tt.wantErr, tt.name)
return

19
pkg/cache/memory.go vendored
View File

@@ -1,6 +1,7 @@
package cache
import (
"context"
"sync"
"golang.org/x/xerrors"
@@ -20,19 +21,19 @@ func NewMemoryCache() *MemoryCache {
}
// PutArtifact stores the artifact information in the memory cache
func (c *MemoryCache) PutArtifact(artifactID string, artifactInfo types.ArtifactInfo) error {
func (c *MemoryCache) PutArtifact(_ context.Context, artifactID string, artifactInfo types.ArtifactInfo) error {
c.artifacts.Store(artifactID, artifactInfo)
return nil
}
// PutBlob stores the blob information in the memory cache
func (c *MemoryCache) PutBlob(blobID string, blobInfo types.BlobInfo) error {
func (c *MemoryCache) PutBlob(_ context.Context, blobID string, blobInfo types.BlobInfo) error {
c.blobs.Store(blobID, blobInfo)
return nil
}
// DeleteBlobs removes the specified blobs from the memory cache
func (c *MemoryCache) DeleteBlobs(blobIDs []string) error {
func (c *MemoryCache) DeleteBlobs(_ context.Context, blobIDs []string) error {
for _, blobID := range blobIDs {
c.blobs.Delete(blobID)
}
@@ -40,7 +41,7 @@ func (c *MemoryCache) DeleteBlobs(blobIDs []string) error {
}
// GetArtifact retrieves the artifact information from the memory cache
func (c *MemoryCache) GetArtifact(artifactID string) (types.ArtifactInfo, error) {
func (c *MemoryCache) GetArtifact(_ context.Context, artifactID string) (types.ArtifactInfo, error) {
info, ok := c.artifacts.Load(artifactID)
if !ok {
return types.ArtifactInfo{}, xerrors.Errorf("artifact (%s) not found in memory cache", artifactID)
@@ -53,7 +54,7 @@ func (c *MemoryCache) GetArtifact(artifactID string) (types.ArtifactInfo, error)
}
// GetBlob retrieves the blob information from the memory cache
func (c *MemoryCache) GetBlob(blobID string) (types.BlobInfo, error) {
func (c *MemoryCache) GetBlob(_ context.Context, blobID string) (types.BlobInfo, error) {
info, ok := c.blobs.Load(blobID)
if !ok {
return types.BlobInfo{}, xerrors.Errorf("blob (%s) not found in memory cache", blobID)
@@ -66,16 +67,16 @@ func (c *MemoryCache) GetBlob(blobID string) (types.BlobInfo, error) {
}
// MissingBlobs determines the missing artifact and blob information in the memory cache
func (c *MemoryCache) MissingBlobs(artifactID string, blobIDs []string) (bool, []string, error) {
func (c *MemoryCache) MissingBlobs(ctx context.Context, artifactID string, blobIDs []string) (bool, []string, error) {
var missingArtifact bool
var missingBlobIDs []string
if _, err := c.GetArtifact(artifactID); err != nil {
if _, err := c.GetArtifact(ctx, artifactID); err != nil {
missingArtifact = true
}
for _, blobID := range blobIDs {
if _, err := c.GetBlob(blobID); err != nil {
if _, err := c.GetBlob(ctx, blobID); err != nil {
missingBlobIDs = append(missingBlobIDs, blobID)
}
}
@@ -91,7 +92,7 @@ func (c *MemoryCache) Close() error {
}
// Clear clears the artifact and blob information from the memory cache
func (c *MemoryCache) Clear() error {
func (c *MemoryCache) Clear(_ context.Context) error {
c.artifacts = sync.Map{}
c.blobs = sync.Map{}
return nil

View File

@@ -34,10 +34,10 @@ func TestMemoryCache_PutArtifact(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := cache.NewMemoryCache()
err := c.PutArtifact(tt.artifactID, tt.artifactInfo)
err := c.PutArtifact(t.Context(), tt.artifactID, tt.artifactInfo)
require.NoError(t, err)
got, err := c.GetArtifact(tt.artifactID)
got, err := c.GetArtifact(t.Context(), tt.artifactID)
require.NoError(t, err)
assert.Equal(t, tt.artifactInfo, got)
})
@@ -82,10 +82,10 @@ func TestMemoryCache_PutBlob(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := cache.NewMemoryCache()
err := c.PutBlob(tt.blobID, tt.blobInfo)
err := c.PutBlob(t.Context(), tt.blobID, tt.blobInfo)
require.NoError(t, err)
got, err := c.GetBlob(tt.blobID)
got, err := c.GetBlob(t.Context(), tt.blobID)
require.NoError(t, err)
assert.Equal(t, tt.blobInfo, got)
})
@@ -123,11 +123,11 @@ func TestMemoryCache_GetArtifact(t *testing.T) {
c := cache.NewMemoryCache()
if !tt.wantErr {
err := c.PutArtifact(tt.artifactID, tt.artifactInfo)
err := c.PutArtifact(t.Context(), tt.artifactID, tt.artifactInfo)
require.NoError(t, err)
}
got, err := c.GetArtifact(tt.artifactID)
got, err := c.GetArtifact(t.Context(), tt.artifactID)
if tt.wantErr {
require.ErrorContains(t, err, "not found in memory cache")
return
@@ -171,11 +171,11 @@ func TestMemoryCache_GetBlob(t *testing.T) {
c := cache.NewMemoryCache()
if !tt.wantErr {
err := c.PutBlob(tt.blobID, tt.blobInfo)
err := c.PutBlob(t.Context(), tt.blobID, tt.blobInfo)
require.NoError(t, err)
}
got, err := c.GetBlob(tt.blobID)
got, err := c.GetBlob(t.Context(), tt.blobID)
if tt.wantErr {
require.ErrorContains(t, err, "not found in memory cache")
return
@@ -260,16 +260,16 @@ func TestMemoryCache_MissingBlobs(t *testing.T) {
c := cache.NewMemoryCache()
if tt.putArtifact {
err := c.PutArtifact(tt.artifactID, types.ArtifactInfo{})
err := c.PutArtifact(t.Context(), tt.artifactID, types.ArtifactInfo{})
require.NoError(t, err)
}
for _, blobID := range tt.putBlobs {
err := c.PutBlob(blobID, types.BlobInfo{})
err := c.PutBlob(t.Context(), blobID, types.BlobInfo{})
require.NoError(t, err)
}
gotMissingArtifact, gotMissingBlobIDs, err := c.MissingBlobs(tt.artifactID, tt.blobIDs)
gotMissingArtifact, gotMissingBlobIDs, err := c.MissingBlobs(t.Context(), tt.artifactID, tt.blobIDs)
require.NoError(t, err)
assert.Equal(t, tt.wantMissingArtifact, gotMissingArtifact)
assert.Equal(t, tt.wantMissingBlobIDs, gotMissingBlobIDs)
@@ -304,16 +304,16 @@ func TestMemoryCache_DeleteBlobs(t *testing.T) {
// Put some blobs in the cache
for _, blobID := range tt.blobIDs {
err := c.PutBlob(blobID, types.BlobInfo{})
err := c.PutBlob(t.Context(), blobID, types.BlobInfo{})
require.NoError(t, err)
}
err := c.DeleteBlobs(tt.blobIDs)
err := c.DeleteBlobs(t.Context(), tt.blobIDs)
require.NoError(t, err)
// Check that the blobs are no longer in the cache
for _, blobID := range tt.blobIDs {
_, err := c.GetBlob(blobID)
_, err := c.GetBlob(t.Context(), blobID)
require.ErrorContains(t, err, "not found in memory cache")
}
})
@@ -337,19 +337,19 @@ func TestMemoryCache_Clear(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := cache.NewMemoryCache()
err := c.PutArtifact(tt.artifactID, types.ArtifactInfo{})
err := c.PutArtifact(t.Context(), tt.artifactID, types.ArtifactInfo{})
require.NoError(t, err)
err = c.PutBlob(tt.blobID, types.BlobInfo{})
err = c.PutBlob(t.Context(), tt.blobID, types.BlobInfo{})
require.NoError(t, err)
err = c.Clear()
err = c.Clear(t.Context())
require.NoError(t, err)
_, err = c.GetArtifact(tt.artifactID)
_, err = c.GetArtifact(t.Context(), tt.artifactID)
require.ErrorContains(t, err, "not found in memory cache")
_, err = c.GetBlob(tt.blobID)
_, err = c.GetBlob(t.Context(), tt.blobID)
require.ErrorContains(t, err, "not found in memory cache")
})
}
@@ -372,19 +372,19 @@ func TestMemoryCache_Close(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
c := cache.NewMemoryCache()
err := c.PutArtifact(tt.artifactID, types.ArtifactInfo{})
err := c.PutArtifact(t.Context(), tt.artifactID, types.ArtifactInfo{})
require.NoError(t, err)
err = c.PutBlob(tt.blobID, types.BlobInfo{})
err = c.PutBlob(t.Context(), tt.blobID, types.BlobInfo{})
require.NoError(t, err)
err = c.Close()
require.NoError(t, err)
_, err = c.GetArtifact(tt.artifactID)
_, err = c.GetArtifact(t.Context(), tt.artifactID)
require.ErrorContains(t, err, "not found in memory cache")
_, err = c.GetBlob(tt.blobID)
_, err = c.GetBlob(t.Context(), tt.blobID)
require.ErrorContains(t, err, "not found in memory cache")
})
}

20
pkg/cache/nop.go vendored
View File

@@ -1,11 +1,19 @@
package cache
import "github.com/aquasecurity/trivy/pkg/fanal/types"
import (
"context"
"github.com/aquasecurity/trivy/pkg/fanal/types"
)
type NopCache struct{}
func NewNopCache() NopCache { return NopCache{} }
func (NopCache) GetArtifact(string) (types.ArtifactInfo, error) { return types.ArtifactInfo{}, nil }
func (NopCache) GetBlob(string) (types.BlobInfo, error) { return types.BlobInfo{}, nil }
func (NopCache) Close() error { return nil }
func (NopCache) Clear() error { return nil }
func NewNopCache() NopCache { return NopCache{} }
func (NopCache) GetArtifact(context.Context, string) (types.ArtifactInfo, error) {
return types.ArtifactInfo{}, nil
}
func (NopCache) GetBlob(context.Context, string) (types.BlobInfo, error) {
return types.BlobInfo{}, nil
}
func (NopCache) Close() error { return nil }
func (NopCache) Clear(context.Context) error { return nil }

30
pkg/cache/redis.go vendored
View File

@@ -116,43 +116,43 @@ func NewRedisCache(backend, caCertPath, certPath, keyPath string, enableTLS bool
}, nil
}
func (c RedisCache) PutArtifact(artifactID string, artifactConfig types.ArtifactInfo) error {
func (c RedisCache) PutArtifact(ctx context.Context, artifactID string, artifactConfig types.ArtifactInfo) error {
key := fmt.Sprintf("%s::%s::%s", redisPrefix, artifactBucket, artifactID)
b, err := json.Marshal(artifactConfig)
if err != nil {
return xerrors.Errorf("failed to marshal artifact JSON: %w", err)
}
if err := c.client.Set(context.TODO(), key, string(b), c.expiration).Err(); err != nil {
if err := c.client.Set(ctx, key, string(b), c.expiration).Err(); err != nil {
return xerrors.Errorf("unable to store artifact information in Redis cache (%s): %w", artifactID, err)
}
return nil
}
func (c RedisCache) PutBlob(blobID string, blobInfo types.BlobInfo) error {
func (c RedisCache) PutBlob(ctx context.Context, blobID string, blobInfo types.BlobInfo) error {
b, err := json.Marshal(blobInfo)
if err != nil {
return xerrors.Errorf("failed to marshal blob JSON: %w", err)
}
key := fmt.Sprintf("%s::%s::%s", redisPrefix, blobBucket, blobID)
if err := c.client.Set(context.TODO(), key, string(b), c.expiration).Err(); err != nil {
if err := c.client.Set(ctx, key, string(b), c.expiration).Err(); err != nil {
return xerrors.Errorf("unable to store blob information in Redis cache (%s): %w", blobID, err)
}
return nil
}
func (c RedisCache) DeleteBlobs(blobIDs []string) error {
func (c RedisCache) DeleteBlobs(ctx context.Context, blobIDs []string) error {
var errs error
for _, blobID := range blobIDs {
key := fmt.Sprintf("%s::%s::%s", redisPrefix, blobBucket, blobID)
if err := c.client.Del(context.TODO(), key).Err(); err != nil {
if err := c.client.Del(ctx, key).Err(); err != nil {
errs = multierror.Append(errs, xerrors.Errorf("unable to delete blob %s: %w", blobID, err))
}
}
return errs
}
func (c RedisCache) GetArtifact(artifactID string) (types.ArtifactInfo, error) {
func (c RedisCache) GetArtifact(ctx context.Context, artifactID string) (types.ArtifactInfo, error) {
key := fmt.Sprintf("%s::%s::%s", redisPrefix, artifactBucket, artifactID)
val, err := c.client.Get(context.TODO(), key).Bytes()
val, err := c.client.Get(ctx, key).Bytes()
if err == redis.Nil {
return types.ArtifactInfo{}, xerrors.Errorf("artifact (%s) is missing in Redis cache", artifactID)
} else if err != nil {
@@ -167,9 +167,9 @@ func (c RedisCache) GetArtifact(artifactID string) (types.ArtifactInfo, error) {
return info, nil
}
func (c RedisCache) GetBlob(blobID string) (types.BlobInfo, error) {
func (c RedisCache) GetBlob(ctx context.Context, blobID string) (types.BlobInfo, error) {
key := fmt.Sprintf("%s::%s::%s", redisPrefix, blobBucket, blobID)
val, err := c.client.Get(context.TODO(), key).Bytes()
val, err := c.client.Get(ctx, key).Bytes()
if err == redis.Nil {
return types.BlobInfo{}, xerrors.Errorf("blob (%s) is missing in Redis cache", blobID)
} else if err != nil {
@@ -183,11 +183,11 @@ func (c RedisCache) GetBlob(blobID string) (types.BlobInfo, error) {
return blobInfo, nil
}
func (c RedisCache) MissingBlobs(artifactID string, blobIDs []string) (bool, []string, error) {
func (c RedisCache) MissingBlobs(ctx context.Context, artifactID string, blobIDs []string) (bool, []string, error) {
var missingArtifact bool
var missingBlobIDs []string
for _, blobID := range blobIDs {
blobInfo, err := c.GetBlob(blobID)
blobInfo, err := c.GetBlob(ctx, blobID)
if err != nil {
// error means cache missed blob info
missingBlobIDs = append(missingBlobIDs, blobID)
@@ -198,7 +198,7 @@ func (c RedisCache) MissingBlobs(artifactID string, blobIDs []string) (bool, []s
}
}
// get artifact info
artifactInfo, err := c.GetArtifact(artifactID)
artifactInfo, err := c.GetArtifact(ctx, artifactID)
// error means cache missed artifact info
if err != nil {
return true, missingBlobIDs, nil
@@ -213,9 +213,7 @@ func (c RedisCache) Close() error {
return c.client.Close()
}
func (c RedisCache) Clear() error {
ctx := context.Background()
func (c RedisCache) Clear(ctx context.Context) error {
for {
keys, cursor, err := c.client.Scan(ctx, 0, redisPrefix+"::*", 100).Result()
if err != nil {

View File

@@ -69,7 +69,7 @@ func TestRedisCache_PutArtifact(t *testing.T) {
c, err := cache.NewRedisCache(fmt.Sprintf("redis://%s", addr), "", "", "", false, 0)
require.NoError(t, err)
err = c.PutArtifact(tt.args.artifactID, tt.args.artifactConfig)
err = c.PutArtifact(t.Context(), tt.args.artifactID, tt.args.artifactConfig)
if tt.wantErr != "" {
require.ErrorContains(t, err, tt.wantErr)
return
@@ -155,7 +155,7 @@ func TestRedisCache_PutBlob(t *testing.T) {
c, err := cache.NewRedisCache(fmt.Sprintf("redis://%s", addr), "", "", "", false, 0)
require.NoError(t, err)
err = c.PutBlob(tt.args.blobID, tt.args.blobConfig)
err = c.PutBlob(t.Context(), tt.args.blobID, tt.args.blobConfig)
if tt.wantErr != "" {
require.ErrorContains(t, err, tt.wantErr)
return
@@ -237,7 +237,7 @@ func TestRedisCache_GetArtifact(t *testing.T) {
c, err := cache.NewRedisCache(fmt.Sprintf("redis://%s", addr), "", "", "", false, 0)
require.NoError(t, err)
got, err := c.GetArtifact(tt.artifactID)
got, err := c.GetArtifact(t.Context(), tt.artifactID)
if tt.wantErr != "" {
require.ErrorContains(t, err, tt.wantErr)
return
@@ -327,7 +327,7 @@ func TestRedisCache_GetBlob(t *testing.T) {
c, err := cache.NewRedisCache(fmt.Sprintf("redis://%s", addr), "", "", "", false, 0)
require.NoError(t, err)
got, err := c.GetBlob(tt.blobID)
got, err := c.GetBlob(t.Context(), tt.blobID)
if tt.wantErr != "" {
require.ErrorContains(t, err, tt.wantErr)
return
@@ -436,7 +436,7 @@ func TestRedisCache_MissingBlobs(t *testing.T) {
c, err := cache.NewRedisCache(fmt.Sprintf("redis://%s", addr), "", "", "", false, 0)
require.NoError(t, err)
missingArtifact, missingBlobIDs, err := c.MissingBlobs(tt.args.artifactID, tt.args.blobIDs)
missingArtifact, missingBlobIDs, err := c.MissingBlobs(t.Context(), tt.args.artifactID, tt.args.blobIDs)
if tt.wantErr != "" {
require.ErrorContains(t, err, tt.wantErr)
return
@@ -481,7 +481,7 @@ func TestRedisCache_Clear(t *testing.T) {
c, err := cache.NewRedisCache(fmt.Sprintf("redis://%s", s.Addr()), "", "", "", false, 0)
require.NoError(t, err)
require.NoError(t, c.Clear())
require.NoError(t, c.Clear(t.Context()))
for i := range 200 {
assert.False(t, s.Exists(fmt.Sprintf("fanal::key%d", i)))
}
@@ -535,7 +535,7 @@ func TestRedisCache_DeleteBlobs(t *testing.T) {
s.Set(tt.wantKey, "any string")
err = c.DeleteBlobs(tt.args.blobIDs)
err = c.DeleteBlobs(t.Context(), tt.args.blobIDs)
if tt.wantErr != "" {
require.ErrorContains(t, err, tt.wantErr)
return

8
pkg/cache/remote.go vendored
View File

@@ -44,7 +44,7 @@ func NewRemoteCache(ctx context.Context, opts RemoteOptions) *RemoteCache {
}
// PutArtifact sends artifact to remote client
func (c RemoteCache) PutArtifact(imageID string, artifactInfo types.ArtifactInfo) error {
func (c RemoteCache) PutArtifact(_ context.Context, imageID string, artifactInfo types.ArtifactInfo) error {
err := rpc.Retry(func() error {
var err error
_, err = c.client.PutArtifact(c.ctx, rpc.ConvertToRPCArtifactInfo(imageID, artifactInfo))
@@ -57,7 +57,7 @@ func (c RemoteCache) PutArtifact(imageID string, artifactInfo types.ArtifactInfo
}
// PutBlob sends blobInfo to remote client
func (c RemoteCache) PutBlob(diffID string, blobInfo types.BlobInfo) error {
func (c RemoteCache) PutBlob(_ context.Context, diffID string, blobInfo types.BlobInfo) error {
err := rpc.Retry(func() error {
var err error
_, err = c.client.PutBlob(c.ctx, rpc.ConvertToRPCPutBlobRequest(diffID, blobInfo))
@@ -70,7 +70,7 @@ func (c RemoteCache) PutBlob(diffID string, blobInfo types.BlobInfo) error {
}
// MissingBlobs fetches missing blobs from RemoteCache
func (c RemoteCache) MissingBlobs(imageID string, layerIDs []string) (bool, []string, error) {
func (c RemoteCache) MissingBlobs(_ context.Context, imageID string, layerIDs []string) (bool, []string, error) {
var layers *rpcCache.MissingBlobsResponse
err := rpc.Retry(func() error {
var err error
@@ -84,7 +84,7 @@ func (c RemoteCache) MissingBlobs(imageID string, layerIDs []string) (bool, []st
}
// DeleteBlobs removes blobs by IDs from RemoteCache
func (c RemoteCache) DeleteBlobs(blobIDs []string) error {
func (c RemoteCache) DeleteBlobs(_ context.Context, blobIDs []string) error {
err := rpc.Retry(func() error {
var err error
_, err = c.client.DeleteBlobs(c.ctx, rpc.ConvertToDeleteBlobsRequest(blobIDs))

View File

@@ -150,7 +150,7 @@ func TestRemoteCache_PutArtifact(t *testing.T) {
ServerAddr: ts.URL,
CustomHeaders: tt.args.customHeaders,
})
err := c.PutArtifact(tt.args.imageID, tt.args.imageInfo)
err := c.PutArtifact(t.Context(), tt.args.imageID, tt.args.imageInfo)
if tt.wantErr != "" {
require.ErrorContains(t, err, tt.wantErr, tt.name)
return
@@ -212,7 +212,7 @@ func TestRemoteCache_PutBlob(t *testing.T) {
ServerAddr: ts.URL,
CustomHeaders: tt.args.customHeaders,
})
err := c.PutBlob(tt.args.diffID, tt.args.layerInfo)
err := c.PutBlob(t.Context(), tt.args.diffID, tt.args.layerInfo)
if tt.wantErr != "" {
require.ErrorContains(t, err, tt.wantErr, tt.name)
return
@@ -291,7 +291,7 @@ func TestRemoteCache_MissingBlobs(t *testing.T) {
ServerAddr: ts.URL,
CustomHeaders: tt.args.customHeaders,
})
gotMissingImage, gotMissingLayerIDs, err := c.MissingBlobs(tt.args.imageID, tt.args.layerIDs)
gotMissingImage, gotMissingLayerIDs, err := c.MissingBlobs(t.Context(), tt.args.imageID, tt.args.layerIDs)
if tt.wantErr != "" {
require.ErrorContains(t, err, tt.wantErr, tt.name)
return
@@ -344,7 +344,7 @@ func TestRemoteCache_PutArtifactInsecure(t *testing.T) {
ServerAddr: ts.URL,
CustomHeaders: nil,
})
err := c.PutArtifact(tt.args.imageID, tt.args.imageInfo)
err := c.PutArtifact(t.Context(), tt.args.imageID, tt.args.imageInfo)
if tt.wantErr != "" {
require.ErrorContains(t, err, tt.wantErr)
return

View File

@@ -71,7 +71,7 @@ func cleanScanCache(ctx context.Context, opts flag.Options) error {
}
defer cleanup()
if err = c.Clear(); err != nil {
if err = c.Clear(ctx); err != nil {
return xerrors.Errorf("clear scan cache: %w", err)
}
return nil

View File

@@ -1,6 +1,8 @@
package applier
import (
"context"
"github.com/samber/lo"
"golang.org/x/xerrors"
@@ -11,7 +13,7 @@ import (
// Applier defines operation to scan image layers
type Applier interface {
ApplyLayers(artifactID string, blobIDs []string) (detail ftypes.ArtifactDetail, err error)
ApplyLayers(ctx context.Context, artifactID string, blobIDs []string) (detail ftypes.ArtifactDetail, err error)
}
type applier struct {
@@ -22,11 +24,11 @@ func NewApplier(c cache.LocalArtifactCache) Applier {
return &applier{cache: c}
}
func (a *applier) ApplyLayers(imageID string, layerKeys []string) (ftypes.ArtifactDetail, error) {
func (a *applier) ApplyLayers(ctx context.Context, imageID string, layerKeys []string) (ftypes.ArtifactDetail, error) {
var layers []ftypes.BlobInfo
var layerInfoList ftypes.Layers
for _, key := range layerKeys {
blob, _ := a.cache.GetBlob(key) // nolint
blob, _ := a.cache.GetBlob(ctx, key) // nolint
if blob.SchemaVersion == 0 {
return ftypes.ArtifactDetail{}, xerrors.Errorf("layer cache missing: %s", key)
}
@@ -38,7 +40,7 @@ func (a *applier) ApplyLayers(imageID string, layerKeys []string) (ftypes.Artifa
mergedLayer := ApplyLayers(layers)
imageInfo, _ := a.cache.GetArtifact(imageID) // nolint
imageInfo, _ := a.cache.GetArtifact(ctx, imageID) // nolint
mergedLayer.ImageConfig = ftypes.ImageConfigDetail{
Packages: imageInfo.HistoryPackages,
Misconfiguration: imageInfo.Misconfiguration,

View File

@@ -43,11 +43,11 @@ func TestApplier_ApplyLayers(t *testing.T) {
setUpCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutArtifact("sha256:4791503518dff090d6a82f7a5c1fd71c41146920e2562fb64308e17ab6834b7e", types.ArtifactInfo{
require.NoError(t, c.PutArtifact(t.Context(), "sha256:4791503518dff090d6a82f7a5c1fd71c41146920e2562fb64308e17ab6834b7e", types.ArtifactInfo{
SchemaVersion: 1,
}))
require.NoError(t, c.PutBlob("sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02", types.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02", types.BlobInfo{
SchemaVersion: 1,
Size: 1000,
Digest: "sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02",
@@ -71,7 +71,7 @@ func TestApplier_ApplyLayers(t *testing.T) {
},
}))
require.NoError(t, c.PutBlob("sha256:dffd9992ca398466a663c87c92cfea2a2db0ae0cf33fcb99da60eec52addbfc5", types.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:dffd9992ca398466a663c87c92cfea2a2db0ae0cf33fcb99da60eec52addbfc5", types.BlobInfo{
SchemaVersion: 1,
Size: 2000,
Digest: "sha256:dffd9992ca398466a663c87c92cfea2a2db0ae0cf33fcb99da60eec52addbfc5",
@@ -91,7 +91,7 @@ func TestApplier_ApplyLayers(t *testing.T) {
},
}))
require.NoError(t, c.PutBlob("sha256:24df0d4e20c0f42d3703bf1f1db2bdd77346c7956f74f423603d651e8e5ae8a7", types.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:24df0d4e20c0f42d3703bf1f1db2bdd77346c7956f74f423603d651e8e5ae8a7", types.BlobInfo{
SchemaVersion: 1,
Size: 3000,
Digest: "sha256:beee9f30bc1f711043e78d4a2be0668955d4b761d587d6f60c2c8dc081efb203",
@@ -244,7 +244,7 @@ func TestApplier_ApplyLayers(t *testing.T) {
},
setUpCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutArtifact("sha256:3bb70bd5fb37e05b8ecaaace5d6a6b5ec7834037c07ecb5907355c23ab70352d", types.ArtifactInfo{
require.NoError(t, c.PutArtifact(t.Context(), "sha256:3bb70bd5fb37e05b8ecaaace5d6a6b5ec7834037c07ecb5907355c23ab70352d", types.ArtifactInfo{
SchemaVersion: 1,
HistoryPackages: types.Packages{
{
@@ -282,7 +282,7 @@ func TestApplier_ApplyLayers(t *testing.T) {
},
}))
require.NoError(t, c.PutBlob("sha256:531743b7098cb2aaf615641007a129173f63ed86ca32fe7b5a246a1c47286028", types.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:531743b7098cb2aaf615641007a129173f63ed86ca32fe7b5a246a1c47286028", types.BlobInfo{
SchemaVersion: 1,
Size: 1000,
Digest: "sha256:a187dde48cd289ac374ad8539930628314bc581a481cdb41409c9289419ddb72",
@@ -513,7 +513,7 @@ func TestApplier_ApplyLayers(t *testing.T) {
},
setUpCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02", types.BlobInfo{}))
require.NoError(t, c.PutBlob(t.Context(), "sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02", types.BlobInfo{}))
return c
},
wantErr: "layer cache missing",
@@ -530,11 +530,11 @@ func TestApplier_ApplyLayers(t *testing.T) {
},
setUpCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutArtifact("sha256:4791503518dff090d6a82f7a5c1fd71c41146920e2562fb64308e17ab6834b7e", types.ArtifactInfo{
require.NoError(t, c.PutArtifact(t.Context(), "sha256:4791503518dff090d6a82f7a5c1fd71c41146920e2562fb64308e17ab6834b7e", types.ArtifactInfo{
SchemaVersion: 1,
}))
require.NoError(t, c.PutBlob("sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02", types.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02", types.BlobInfo{
SchemaVersion: 1,
Size: 1000,
Digest: "sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02",
@@ -554,7 +554,7 @@ func TestApplier_ApplyLayers(t *testing.T) {
},
}))
require.NoError(t, c.PutBlob("sha256:dffd9992ca398466a663c87c92cfea2a2db0ae0cf33fcb99da60eec52addbfc5", types.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:dffd9992ca398466a663c87c92cfea2a2db0ae0cf33fcb99da60eec52addbfc5", types.BlobInfo{
SchemaVersion: 1,
Size: 2000,
Digest: "sha256:dffd9992ca398466a663c87c92cfea2a2db0ae0cf33fcb99da60eec52addbfc5",
@@ -574,7 +574,7 @@ func TestApplier_ApplyLayers(t *testing.T) {
},
}))
require.NoError(t, c.PutBlob("sha256:24df0d4e20c0f42d3703bf1f1db2bdd77346c7956f74f423603d651e8e5ae8a7", types.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:24df0d4e20c0f42d3703bf1f1db2bdd77346c7956f74f423603d651e8e5ae8a7", types.BlobInfo{
SchemaVersion: 1,
Size: 3000,
Digest: "sha256:beee9f30bc1f711043e78d4a2be0668955d4b761d587d6f60c2c8dc081efb203",
@@ -700,11 +700,11 @@ func TestApplier_ApplyLayers(t *testing.T) {
},
setUpCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutArtifact("sha256:4791503518dff090d6a82f7a5c1fd71c41146920e2562fb64308e17ab6834b7e", types.ArtifactInfo{
require.NoError(t, c.PutArtifact(t.Context(), "sha256:4791503518dff090d6a82f7a5c1fd71c41146920e2562fb64308e17ab6834b7e", types.ArtifactInfo{
SchemaVersion: 1,
}))
require.NoError(t, c.PutBlob("sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02", types.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02", types.BlobInfo{
SchemaVersion: 1,
OS: types.OS{
Family: "debian",
@@ -732,11 +732,11 @@ func TestApplier_ApplyLayers(t *testing.T) {
},
setUpCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutArtifact("sha256:4791503518dff090d6a82f7a5c1fd71c41146920e2562fb64308e17ab6834b7e", types.ArtifactInfo{
require.NoError(t, c.PutArtifact(t.Context(), "sha256:4791503518dff090d6a82f7a5c1fd71c41146920e2562fb64308e17ab6834b7e", types.ArtifactInfo{
SchemaVersion: 1,
}))
require.NoError(t, c.PutBlob("sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02", types.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02", types.BlobInfo{
SchemaVersion: 1,
Size: 1000,
Digest: "sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02",
@@ -772,7 +772,7 @@ func TestApplier_ApplyLayers(t *testing.T) {
},
}))
require.NoError(t, c.PutBlob("sha256:dffd9992ca398466a663c87c92cfea2a2db0ae0cf33fcb99da60eec52addbfc5", types.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:dffd9992ca398466a663c87c92cfea2a2db0ae0cf33fcb99da60eec52addbfc5", types.BlobInfo{
SchemaVersion: 1,
Size: 2000,
Digest: "sha256:dffd9992ca398466a663c87c92cfea2a2db0ae0cf33fcb99da60eec52addbfc5",
@@ -931,11 +931,11 @@ func TestApplier_ApplyLayers(t *testing.T) {
},
setUpCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutArtifact("sha256:fb44d01953611ba18d43d88e158c25579d18eff42db671182245010620a283f3", types.ArtifactInfo{
require.NoError(t, c.PutArtifact(t.Context(), "sha256:fb44d01953611ba18d43d88e158c25579d18eff42db671182245010620a283f3", types.ArtifactInfo{
SchemaVersion: 1,
}))
require.NoError(t, c.PutBlob("sha256:2615f175cf3da67c48c6542914744943ee5e9c253547b03e3cfe8aae605c3199", types.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:2615f175cf3da67c48c6542914744943ee5e9c253547b03e3cfe8aae605c3199", types.BlobInfo{
SchemaVersion: 1,
Size: 1000,
Digest: "sha256:fb44d01953611ba18d43d88e158c25579d18eff42db671182245010620a283f3",
@@ -1009,7 +1009,7 @@ func TestApplier_ApplyLayers(t *testing.T) {
a := applier.NewApplier(c)
got, err := a.ApplyLayers(tt.args.imageID, tt.args.layerIDs)
got, err := a.ApplyLayers(t.Context(), tt.args.imageID, tt.args.layerIDs)
if tt.wantErr != "" {
require.ErrorContains(t, err, tt.wantErr, tt.name)
return

View File

@@ -130,7 +130,7 @@ func (a Artifact) Inspect(ctx context.Context) (ref artifact.Reference, err erro
// Parse histories and extract a list of "created_by"
layerKeyMap := a.consolidateCreatedBy(diffIDs, layerKeys, configFile)
missingImage, missingLayers, err := a.cache.MissingBlobs(imageKey, layerKeys)
missingImage, missingLayers, err := a.cache.MissingBlobs(ctx, imageKey, layerKeys)
if err != nil {
return artifact.Reference{}, xerrors.Errorf("unable to get missing layers: %w", err)
}
@@ -332,7 +332,7 @@ func (a Artifact) inspect(ctx context.Context, missingImage string, layerKeys, b
if err != nil {
return nil, xerrors.Errorf("failed to analyze layer (%s): %w", layer.DiffID, err)
}
if err = a.cache.PutBlob(layerKey, layerInfo); err != nil {
if err = a.cache.PutBlob(ctx, layerKey, layerInfo); err != nil {
return nil, xerrors.Errorf("failed to store layer: %s in cache: %w", layerKey, err)
}
if lo.IsNotEmpty(layerInfo.OS) {
@@ -518,7 +518,7 @@ func (a Artifact) inspectConfig(ctx context.Context, imageID string, osFound typ
HistoryPackages: result.HistoryPackages,
}
if err := a.cache.PutArtifact(imageID, info); err != nil {
if err := a.cache.PutArtifact(ctx, imageID, info); err != nil {
return xerrors.Errorf("failed to put image info into the cache: %w", err)
}

View File

@@ -493,7 +493,7 @@ func TestArtifact_Inspect(t *testing.T) {
},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutArtifact("sha256:0bebf0773ffd87baa7c64fbdbdf79a24ae125e3f99a8adebe52d1ccbe6bed16b", types.ArtifactInfo{
require.NoError(t, c.PutArtifact(t.Context(), "sha256:0bebf0773ffd87baa7c64fbdbdf79a24ae125e3f99a8adebe52d1ccbe6bed16b", types.ArtifactInfo{
SchemaVersion: types.ArtifactJSONSchemaVersion,
}))
return c
@@ -1854,7 +1854,7 @@ func TestArtifact_Inspect(t *testing.T) {
},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutArtifact("sha256:0bebf0773ffd87baa7c64fbdbdf79a24ae125e3f99a8adebe52d1ccbe6bed16b", types.ArtifactInfo{
require.NoError(t, c.PutArtifact(t.Context(), "sha256:0bebf0773ffd87baa7c64fbdbdf79a24ae125e3f99a8adebe52d1ccbe6bed16b", types.ArtifactInfo{
SchemaVersion: types.ArtifactJSONSchemaVersion,
}))
return c

View File

@@ -173,7 +173,7 @@ func (a Artifact) Inspect(ctx context.Context) (artifact.Reference, error) {
// Check if the cache exists only when it's a clean git repository
if a.isClean && a.repoMetadata.Commit != "" {
_, missingBlobs, err := a.cache.MissingBlobs(cacheKey, []string{cacheKey})
_, missingBlobs, err := a.cache.MissingBlobs(ctx, cacheKey, []string{cacheKey})
if err != nil {
return artifact.Reference{}, xerrors.Errorf("unable to get missing blob: %w", err)
}
@@ -251,7 +251,7 @@ func (a Artifact) Inspect(ctx context.Context) (artifact.Reference, error) {
return artifact.Reference{}, xerrors.Errorf("failed to call hooks: %w", err)
}
if err = a.cache.PutBlob(cacheKey, blobInfo); err != nil {
if err = a.cache.PutBlob(ctx, cacheKey, blobInfo); err != nil {
return artifact.Reference{}, xerrors.Errorf("failed to store blob (%s) in cache: %w", cacheKey, err)
}
@@ -334,7 +334,7 @@ func (a Artifact) Clean(reference artifact.Reference) error {
if a.isClean && a.repoMetadata.Commit != "" {
return nil
}
return a.cache.DeleteBlobs(reference.BlobIDs)
return a.cache.DeleteBlobs(context.TODO(), reference.BlobIDs)
}
func (a Artifact) calcCacheKey() (string, error) {

View File

@@ -268,7 +268,7 @@ func TestArtifact_Inspect(t *testing.T) {
}
// Store the blob info in the cache to test cache hit
cacheKey := "sha256:dc7c6039424c9fce969d3c2972d261af442a33f13e7494464386dbe280612d4c"
err := c.PutBlob(cacheKey, blobInfo)
err := c.PutBlob(t.Context(), cacheKey, blobInfo)
require.NoError(t, err)
},
want: artifact.Reference{
@@ -325,7 +325,7 @@ func TestArtifact_Inspect(t *testing.T) {
assert.Equal(t, tt.want, ref)
// Verify cache contents after inspection
blobInfo, err := c.GetBlob(tt.want.BlobIDs[0])
blobInfo, err := c.GetBlob(t.Context(), tt.want.BlobIDs[0])
require.NoError(t, err)
assert.Equal(t, tt.wantBlobInfo, &blobInfo, "cache content mismatch")
})

View File

@@ -71,7 +71,7 @@ func (a Artifact) Inspect(ctx context.Context) (artifact.Reference, error) {
return artifact.Reference{}, xerrors.Errorf("failed to calculate a cache key: %w", err)
}
if err = a.cache.PutBlob(cacheKey, blobInfo); err != nil {
if err = a.cache.PutBlob(ctx, cacheKey, blobInfo); err != nil {
return artifact.Reference{}, xerrors.Errorf("failed to store blob (%s) in cache: %w", cacheKey, err)
}
@@ -96,7 +96,7 @@ func (a Artifact) Inspect(ctx context.Context) (artifact.Reference, error) {
}
func (a Artifact) Clean(reference artifact.Reference) error {
return a.cache.DeleteBlobs(reference.BlobIDs)
return a.cache.DeleteBlobs(context.TODO(), reference.BlobIDs)
}
func (a Artifact) calcCacheKey(blobInfo types.BlobInfo) (string, error) {

View File

@@ -54,7 +54,7 @@ func (a *EBS) Inspect(ctx context.Context) (artifact.Reference, error) {
return artifact.Reference{}, xerrors.Errorf("cache key calculation error: %w", err)
}
if a.hasCache(cacheKey) {
if a.hasCache(ctx, cacheKey) {
return artifact.Reference{
Name: a.snapshotID,
Type: types.TypeVM,
@@ -68,7 +68,7 @@ func (a *EBS) Inspect(ctx context.Context) (artifact.Reference, error) {
return artifact.Reference{}, xerrors.Errorf("inspection error: %w", err)
}
if err = a.cache.PutBlob(cacheKey, blobInfo); err != nil {
if err = a.cache.PutBlob(ctx, cacheKey, blobInfo); err != nil {
return artifact.Reference{}, xerrors.Errorf("failed to store blob (%s) in cache: %w", cacheKey, err)
}
@@ -109,8 +109,8 @@ func (a *EBS) calcCacheKey(key string) (string, error) {
return s, nil
}
func (a *EBS) hasCache(cacheKey string) bool {
_, missingCacheKeys, err := a.cache.MissingBlobs(cacheKey, []string{cacheKey})
func (a *EBS) hasCache(ctx context.Context, cacheKey string) bool {
_, missingCacheKeys, err := a.cache.MissingBlobs(ctx, cacheKey, []string{cacheKey})
if err != nil {
a.logger.Debug("Unable to query missing cache", log.Err(err))
return false

View File

@@ -89,7 +89,7 @@ func (a *ImageFile) Inspect(ctx context.Context) (artifact.Reference, error) {
return artifact.Reference{}, xerrors.Errorf("cache calculation error: %w", err)
}
if err = a.cache.PutBlob(cacheKey, blobInfo); err != nil {
if err = a.cache.PutBlob(ctx, cacheKey, blobInfo); err != nil {
return artifact.Reference{}, xerrors.Errorf("failed to store blob (%s) in cache: %w", cacheKey, err)
}
@@ -119,5 +119,5 @@ func (a *ImageFile) calcCacheKey(blobInfo types.BlobInfo) (string, error) {
func (a *ImageFile) Clean(reference artifact.Reference) error {
_ = a.file.Close()
return a.cache.DeleteBlobs(reference.BlobIDs)
return a.cache.DeleteBlobs(context.TODO(), reference.BlobIDs)
}

View File

@@ -678,7 +678,7 @@ func localImageTestWithNamespace(t *testing.T, namespace string) {
require.NoError(t, err)
defer func() {
c.Clear()
c.Clear(t.Context())
c.Close()
}()
@@ -712,7 +712,7 @@ func localImageTestWithNamespace(t *testing.T, namespace string) {
require.Equal(t, tt.wantMetadata, ref.ImageMetadata)
a := applier.NewApplier(c)
got, err := a.ApplyLayers(ref.ID, ref.BlobIDs)
got, err := a.ApplyLayers(ctx, ref.ID, ref.BlobIDs)
require.NoError(t, err)
tag := strings.Split(tt.imageName, ":")[1]
@@ -813,7 +813,7 @@ func TestContainerd_PullImage(t *testing.T) {
require.NoError(t, err)
defer func() {
c.Clear()
c.Clear(t.Context())
c.Close()
}()
@@ -841,7 +841,7 @@ func TestContainerd_PullImage(t *testing.T) {
require.Equal(t, tt.wantMetadata, ref.ImageMetadata)
a := applier.NewApplier(c)
got, err := a.ApplyLayers(ref.ID, ref.BlobIDs)
got, err := a.ApplyLayers(ctx, ref.ID, ref.BlobIDs)
require.NoError(t, err)
// Parse a golden file

View File

@@ -177,7 +177,7 @@ func TestFanal_Library_DockerMode(t *testing.T) {
}
// clear Cache
require.NoError(t, c.Clear(), tt.name)
require.NoError(t, c.Clear(t.Context()), tt.name)
})
}
}
@@ -208,7 +208,7 @@ func TestFanal_Library_TarMode(t *testing.T) {
runChecks(t, ctx, ar, applier, tt)
// clear Cache
require.NoError(t, c.Clear(), tt.name)
require.NoError(t, c.Close())
})
}
}
@@ -216,7 +216,7 @@ func TestFanal_Library_TarMode(t *testing.T) {
func runChecks(t *testing.T, ctx context.Context, ar artifact.Artifact, applier applier.Applier, tc testCase) {
imageInfo, err := ar.Inspect(ctx)
require.NoError(t, err, tc.name)
imageDetail, err := applier.ApplyLayers(imageInfo.ID, imageInfo.BlobIDs)
imageDetail, err := applier.ApplyLayers(ctx, imageInfo.ID, imageInfo.BlobIDs)
require.NoError(t, err, tc.name)
commonChecks(t, imageDetail, tc)
}

View File

@@ -258,7 +258,7 @@ func analyze(t *testing.T, ctx context.Context, imageRef string, opt types.Image
}
defer ar.Clean(imageInfo)
imageDetail, err := ap.ApplyLayers(imageInfo.ID, imageInfo.BlobIDs)
imageDetail, err := ap.ApplyLayers(ctx, imageInfo.ID, imageInfo.BlobIDs)
if err != nil {
return nil, err
}

View File

@@ -122,32 +122,33 @@ func NewCacheServer(c cache.Cache) *CacheServer {
}
// PutArtifact puts the artifacts in cache
func (s *CacheServer) PutArtifact(_ context.Context, in *rpcCache.PutArtifactRequest) (*emptypb.Empty, error) {
func (s *CacheServer) PutArtifact(ctx context.Context, in *rpcCache.PutArtifactRequest) (*emptypb.Empty, error) {
if in.ArtifactInfo == nil {
return nil, teeError(xerrors.Errorf("empty image info"))
}
imageInfo := rpc.ConvertFromRPCPutArtifactRequest(in)
if err := s.cache.PutArtifact(in.ArtifactId, imageInfo); err != nil {
if err := s.cache.PutArtifact(ctx, in.ArtifactId, imageInfo); err != nil {
return nil, teeError(xerrors.Errorf("unable to store image info in cache: %w", err))
}
return &emptypb.Empty{}, nil
}
// PutBlob puts the blobs in cache
func (s *CacheServer) PutBlob(_ context.Context, in *rpcCache.PutBlobRequest) (*emptypb.Empty, error) {
func (s *CacheServer) PutBlob(ctx context.Context, in *rpcCache.PutBlobRequest) (*emptypb.Empty, error) {
if in.BlobInfo == nil {
return nil, teeError(xerrors.Errorf("empty layer info"))
}
layerInfo := rpc.ConvertFromRPCPutBlobRequest(in)
if err := s.cache.PutBlob(in.DiffId, layerInfo); err != nil {
if err := s.cache.PutBlob(ctx, in.DiffId, layerInfo); err != nil {
return nil, teeError(xerrors.Errorf("unable to store layer info in cache: %w", err))
}
return &emptypb.Empty{}, nil
}
// MissingBlobs returns missing blobs from cache
func (s *CacheServer) MissingBlobs(_ context.Context, in *rpcCache.MissingBlobsRequest) (*rpcCache.MissingBlobsResponse, error) {
missingArtifact, blobIDs, err := s.cache.MissingBlobs(in.ArtifactId, in.BlobIds)
func (s *CacheServer) MissingBlobs(ctx context.Context, in *rpcCache.MissingBlobsRequest) (*rpcCache.MissingBlobsResponse, error) {
missingArtifact, blobIDs, err := s.cache.MissingBlobs(ctx, in.ArtifactId, in.BlobIds)
if err != nil {
return nil, teeError(xerrors.Errorf("failed to get missing blobs: %w", err))
}
@@ -158,9 +159,9 @@ func (s *CacheServer) MissingBlobs(_ context.Context, in *rpcCache.MissingBlobsR
}
// DeleteBlobs removes blobs by IDs
func (s *CacheServer) DeleteBlobs(_ context.Context, in *rpcCache.DeleteBlobsRequest) (*emptypb.Empty, error) {
func (s *CacheServer) DeleteBlobs(ctx context.Context, in *rpcCache.DeleteBlobsRequest) (*emptypb.Empty, error) {
blobIDs := rpc.ConvertFromDeleteBlobsRequest(in)
if err := s.cache.DeleteBlobs(blobIDs); err != nil {
if err := s.cache.DeleteBlobs(ctx, blobIDs); err != nil {
return nil, teeError(xerrors.Errorf("failed to remove a blobs: %w", err))
}
return &emptypb.Empty{}, nil

View File

@@ -56,11 +56,11 @@ func TestScanServer_Scan(t *testing.T) {
fixtures: []string{"../../scan/local/testdata/fixtures/happy.yaml"},
setUpCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutArtifact("sha256:e7d92cdc71feacf90708cb59182d0df1b911f8ae022d29e8e95d75ca6a99776a", ftypes.ArtifactInfo{
require.NoError(t, c.PutArtifact(t.Context(), "sha256:e7d92cdc71feacf90708cb59182d0df1b911f8ae022d29e8e95d75ca6a99776a", ftypes.ArtifactInfo{
SchemaVersion: 1,
}))
require.NoError(t, c.PutBlob("sha256:beee9f30bc1f711043e78d4a2be0668955d4b761d587d6f60c2c8dc081efb203", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:beee9f30bc1f711043e78d4a2be0668955d4b761d587d6f60c2c8dc081efb203", ftypes.BlobInfo{
SchemaVersion: 1,
Size: 1000,
DiffID: "sha256:beee9f30bc1f711043e78d4a2be0668955d4b761d587d6f60c2c8dc081efb203",
@@ -160,11 +160,11 @@ func TestScanServer_Scan(t *testing.T) {
fixtures: []string{"../../scan/local/testdata/fixtures/sad.yaml"},
setUpCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutArtifact("sha256:e7d92cdc71feacf90708cb59182d0df1b911f8ae022d29e8e95d75ca6a99776a", ftypes.ArtifactInfo{
require.NoError(t, c.PutArtifact(t.Context(), "sha256:e7d92cdc71feacf90708cb59182d0df1b911f8ae022d29e8e95d75ca6a99776a", ftypes.ArtifactInfo{
SchemaVersion: 1,
}))
require.NoError(t, c.PutBlob("sha256:beee9f30bc1f711043e78d4a2be0668955d4b761d587d6f60c2c8dc081efb203", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:beee9f30bc1f711043e78d4a2be0668955d4b761d587d6f60c2c8dc081efb203", ftypes.BlobInfo{
SchemaVersion: 1,
Digest: "sha256:beee9f30bc1f711043e78d4a2be0668955d4b761d587d6f60c2c8dc081efb203",
DiffID: "sha256:beee9f30bc1f711043e78d4a2be0668955d4b761d587d6f60c2c8dc081efb203",
@@ -554,11 +554,11 @@ func TestCacheServer_MissingBlobs(t *testing.T) {
},
setUpCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutArtifact("sha256:e7d92cdc71feacf90708cb59182d0df1b911f8ae022d29e8e95d75ca6a99776a", ftypes.ArtifactInfo{
require.NoError(t, c.PutArtifact(t.Context(), "sha256:e7d92cdc71feacf90708cb59182d0df1b911f8ae022d29e8e95d75ca6a99776a", ftypes.ArtifactInfo{
SchemaVersion: ftypes.ArtifactJSONSchemaVersion,
}))
require.NoError(t, c.PutBlob("sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02", ftypes.BlobInfo{
SchemaVersion: ftypes.BlobJSONSchemaVersion,
}))

View File

@@ -53,7 +53,7 @@ func NewService(a applier.Applier, osPkgScanner ospkg.Scanner, langPkgScanner la
// Scan scans the artifact and return results.
func (s Service) Scan(ctx context.Context, targetName, artifactKey string, blobKeys []string, options types.ScanOptions) (
types.ScanResponse, error) {
detail, err := s.applier.ApplyLayers(artifactKey, blobKeys)
detail, err := s.applier.ApplyLayers(ctx, artifactKey, blobKeys)
switch {
case errors.Is(err, analyzer.ErrUnknownOS):
log.Debug("OS is not detected.")

View File

@@ -182,7 +182,7 @@ func TestScanner_Scan(t *testing.T) {
fixtures: []string{"testdata/fixtures/happy.yaml"},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
SchemaVersion: ftypes.BlobJSONSchemaVersion,
Size: 1000,
DiffID: "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10",
@@ -303,7 +303,7 @@ func TestScanner_Scan(t *testing.T) {
fixtures: []string{"testdata/fixtures/happy.yaml"},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
SchemaVersion: ftypes.BlobJSONSchemaVersion,
Size: 1000,
DiffID: "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10",
@@ -386,7 +386,7 @@ func TestScanner_Scan(t *testing.T) {
},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
SchemaVersion: ftypes.BlobJSONSchemaVersion,
Size: 1000,
DiffID: "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10",
@@ -479,7 +479,7 @@ func TestScanner_Scan(t *testing.T) {
},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
SchemaVersion: ftypes.BlobJSONSchemaVersion,
Size: 1000,
DiffID: "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10",
@@ -593,7 +593,7 @@ func TestScanner_Scan(t *testing.T) {
fixtures: []string{"testdata/fixtures/happy.yaml"},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
SchemaVersion: ftypes.BlobJSONSchemaVersion,
Size: 1000,
DiffID: "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10",
@@ -683,7 +683,7 @@ func TestScanner_Scan(t *testing.T) {
fixtures: []string{"testdata/fixtures/happy.yaml"},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
SchemaVersion: ftypes.BlobJSONSchemaVersion,
Applications: []ftypes.Application{
{
@@ -795,7 +795,7 @@ func TestScanner_Scan(t *testing.T) {
fixtures: []string{"testdata/fixtures/happy.yaml"},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
SchemaVersion: ftypes.BlobJSONSchemaVersion,
Size: 1000,
DiffID: "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10",
@@ -886,7 +886,7 @@ func TestScanner_Scan(t *testing.T) {
fixtures: []string{"testdata/fixtures/happy.yaml"},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
SchemaVersion: ftypes.BlobJSONSchemaVersion,
Size: 1000,
DiffID: "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10",
@@ -968,7 +968,7 @@ func TestScanner_Scan(t *testing.T) {
fixtures: []string{"testdata/fixtures/happy.yaml"},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:a6d503001157aedc826853f9b67f26d35966221b158bff03849868ae4a821116", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:a6d503001157aedc826853f9b67f26d35966221b158bff03849868ae4a821116", ftypes.BlobInfo{
SchemaVersion: ftypes.BlobJSONSchemaVersion,
OS: ftypes.OS{},
}))
@@ -997,7 +997,7 @@ func TestScanner_Scan(t *testing.T) {
fixtures: []string{"testdata/fixtures/happy.yaml"},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:0ea33a93585cf1917ba522b2304634c3073654062d5282c1346322967790ef33", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:0ea33a93585cf1917ba522b2304634c3073654062d5282c1346322967790ef33", ftypes.BlobInfo{
SchemaVersion: ftypes.BlobJSONSchemaVersion,
Size: 1000,
DiffID: "sha256:0ea33a93585cf1917ba522b2304634c3073654062d5282c1346322967790ef33",
@@ -1098,7 +1098,7 @@ func TestScanner_Scan(t *testing.T) {
setUpHook: true,
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
SchemaVersion: ftypes.BlobJSONSchemaVersion,
Size: 1000,
DiffID: "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10",
@@ -1173,7 +1173,7 @@ func TestScanner_Scan(t *testing.T) {
fixtures: []string{"testdata/fixtures/happy.yaml"},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
SchemaVersion: ftypes.BlobJSONSchemaVersion,
Size: 1000,
DiffID: "sha256:9922bc15eeefe1637b803ef2106f178152ce19a391f24aec838cbe2e48e73303",
@@ -1325,7 +1325,7 @@ func TestScanner_Scan(t *testing.T) {
fixtures: []string{"testdata/fixtures/happy.yaml"},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
SchemaVersion: 0,
}))
return c
@@ -1346,7 +1346,7 @@ func TestScanner_Scan(t *testing.T) {
fixtures: []string{"testdata/fixtures/sad.yaml"},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
SchemaVersion: ftypes.BlobJSONSchemaVersion,
Size: 1000,
DiffID: "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10",
@@ -1386,7 +1386,7 @@ func TestScanner_Scan(t *testing.T) {
fixtures: []string{"testdata/fixtures/happy.yaml"},
setupCache: func(t *testing.T) cache.Cache {
c := cache.NewMemoryCache()
require.NoError(t, c.PutBlob("sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
require.NoError(t, c.PutBlob(t.Context(), "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10", ftypes.BlobInfo{
SchemaVersion: ftypes.BlobJSONSchemaVersion,
Size: 1000,
DiffID: "sha256:5216338b40a7b96416b8b9858974bbe4acc3096ee60acbc4dfb1ee02aecceb10",