mirror of
https://github.com/aquasecurity/trivy.git
synced 2025-12-12 15:50:15 -08:00
feat: add post-analyzers (#3640)
Co-authored-by: DmitriyLewen <dmitriy.lewen@smartforce.io>
This commit is contained in:
@@ -3,6 +3,7 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/analyzer"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -62,6 +63,7 @@ func TestModule(t *testing.T) {
|
||||
// Run Trivy
|
||||
err = execute(osArgs)
|
||||
assert.NoError(t, err)
|
||||
defer analyzer.DeregisterAnalyzer("spring4shell")
|
||||
|
||||
// Compare want and got
|
||||
compareReports(t, tt.golden, outputFile)
|
||||
|
||||
@@ -19,10 +19,13 @@ import (
|
||||
aos "github.com/aquasecurity/trivy/pkg/fanal/analyzer/os"
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/log"
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/types"
|
||||
"github.com/aquasecurity/trivy/pkg/mapfs"
|
||||
"github.com/aquasecurity/trivy/pkg/syncx"
|
||||
)
|
||||
|
||||
var (
|
||||
analyzers = map[Type]analyzer{}
|
||||
analyzers = map[Type]analyzer{}
|
||||
postAnalyzers = map[Type]postAnalyzerInitialize{}
|
||||
|
||||
// ErrUnknownOS occurs when unknown OS is analyzed.
|
||||
ErrUnknownOS = xerrors.New("unknown OS")
|
||||
@@ -39,6 +42,7 @@ var (
|
||||
// AnalyzerOptions is used to initialize analyzers
|
||||
type AnalyzerOptions struct {
|
||||
Group Group
|
||||
Slow bool
|
||||
FilePatterns []string
|
||||
DisabledAnalyzers []Type
|
||||
SecretScannerOption SecretScannerOption
|
||||
@@ -70,6 +74,13 @@ type analyzer interface {
|
||||
Required(filePath string, info os.FileInfo) bool
|
||||
}
|
||||
|
||||
type PostAnalyzer interface {
|
||||
Type() Type
|
||||
Version() int
|
||||
PostAnalyze(ctx context.Context, input PostAnalysisInput) (*AnalysisResult, error)
|
||||
Required(filePath string, info os.FileInfo) bool
|
||||
}
|
||||
|
||||
////////////////////
|
||||
// Analyzer group //
|
||||
////////////////////
|
||||
@@ -79,9 +90,21 @@ type Group string
|
||||
const GroupBuiltin Group = "builtin"
|
||||
|
||||
func RegisterAnalyzer(analyzer analyzer) {
|
||||
if _, ok := analyzers[analyzer.Type()]; ok {
|
||||
log.Logger.Fatalf("analyzer %s is registered twice", analyzer.Type())
|
||||
}
|
||||
analyzers[analyzer.Type()] = analyzer
|
||||
}
|
||||
|
||||
type postAnalyzerInitialize func(options AnalyzerOptions) PostAnalyzer
|
||||
|
||||
func RegisterPostAnalyzer(t Type, initializer postAnalyzerInitialize) {
|
||||
if _, ok := postAnalyzers[t]; ok {
|
||||
log.Logger.Fatalf("analyzer %s is registered twice", t)
|
||||
}
|
||||
postAnalyzers[t] = initializer
|
||||
}
|
||||
|
||||
// DeregisterAnalyzer is mainly for testing
|
||||
func DeregisterAnalyzer(t Type) {
|
||||
delete(analyzers, t)
|
||||
@@ -96,8 +119,9 @@ type CustomGroup interface {
|
||||
type Opener func() (dio.ReadSeekCloserAt, error)
|
||||
|
||||
type AnalyzerGroup struct {
|
||||
analyzers []analyzer
|
||||
filePatterns map[Type][]*regexp.Regexp
|
||||
analyzers []analyzer
|
||||
postAnalyzers []PostAnalyzer
|
||||
filePatterns map[Type][]*regexp.Regexp
|
||||
}
|
||||
|
||||
///////////////////////////
|
||||
@@ -113,6 +137,11 @@ type AnalysisInput struct {
|
||||
Options AnalysisOptions
|
||||
}
|
||||
|
||||
type PostAnalysisInput struct {
|
||||
FS fs.FS
|
||||
Options AnalysisOptions
|
||||
}
|
||||
|
||||
type AnalysisOptions struct {
|
||||
Offline bool
|
||||
}
|
||||
@@ -335,16 +364,36 @@ func NewAnalyzerGroup(opt AnalyzerOptions) (AnalyzerGroup, error) {
|
||||
group.analyzers = append(group.analyzers, a)
|
||||
}
|
||||
|
||||
for analyzerType, init := range postAnalyzers {
|
||||
a := init(opt)
|
||||
if !belongToGroup(groupName, analyzerType, opt.DisabledAnalyzers, a) {
|
||||
continue
|
||||
}
|
||||
group.postAnalyzers = append(group.postAnalyzers, a)
|
||||
}
|
||||
|
||||
return group, nil
|
||||
}
|
||||
|
||||
type Versions struct {
|
||||
Analyzers map[string]int
|
||||
PostAnalyzers map[string]int
|
||||
}
|
||||
|
||||
// AnalyzerVersions returns analyzer version identifier used for cache keys.
|
||||
func (ag AnalyzerGroup) AnalyzerVersions() map[string]int {
|
||||
versions := map[string]int{}
|
||||
func (ag AnalyzerGroup) AnalyzerVersions() Versions {
|
||||
analyzerVersions := map[string]int{}
|
||||
for _, a := range ag.analyzers {
|
||||
versions[string(a.Type())] = a.Version()
|
||||
analyzerVersions[string(a.Type())] = a.Version()
|
||||
}
|
||||
postAnalyzerVersions := map[string]int{}
|
||||
for _, a := range ag.postAnalyzers {
|
||||
postAnalyzerVersions[string(a.Type())] = a.Version()
|
||||
}
|
||||
return Versions{
|
||||
Analyzers: analyzerVersions,
|
||||
PostAnalyzers: postAnalyzerVersions,
|
||||
}
|
||||
return versions
|
||||
}
|
||||
|
||||
func (ag AnalyzerGroup) AnalyzeFile(ctx context.Context, wg *sync.WaitGroup, limit *semaphore.Weighted, result *AnalysisResult,
|
||||
@@ -394,15 +443,47 @@ func (ag AnalyzerGroup) AnalyzeFile(ctx context.Context, wg *sync.WaitGroup, lim
|
||||
log.Logger.Debugf("Analysis error: %s", err)
|
||||
return
|
||||
}
|
||||
if ret != nil {
|
||||
result.Merge(ret)
|
||||
}
|
||||
result.Merge(ret)
|
||||
}(a, rc)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ag AnalyzerGroup) RequiredPostAnalyzers(filePath string, info os.FileInfo) []Type {
|
||||
var postAnalyzerTypes []Type
|
||||
for _, a := range ag.postAnalyzers {
|
||||
if a.Required(filePath, info) {
|
||||
postAnalyzerTypes = append(postAnalyzerTypes, a.Type())
|
||||
}
|
||||
}
|
||||
return postAnalyzerTypes
|
||||
}
|
||||
|
||||
func (ag AnalyzerGroup) PostAnalyze(ctx context.Context, files *syncx.Map[Type, *mapfs.FS], result *AnalysisResult, opts AnalysisOptions) error {
|
||||
for _, a := range ag.postAnalyzers {
|
||||
fsys, ok := files.Load(a.Type())
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
filteredFS, err := fsys.Filter(result.SystemInstalledFiles)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("unable to filter filesystem: %w", err)
|
||||
}
|
||||
|
||||
res, err := a.PostAnalyze(ctx, PostAnalysisInput{
|
||||
FS: filteredFS,
|
||||
Options: opts,
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("post analysis error: %w", err)
|
||||
}
|
||||
result.Merge(res)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ag AnalyzerGroup) filePatternMatch(analyzerType Type, filePath string) bool {
|
||||
for _, pattern := range ag.filePatterns[analyzerType] {
|
||||
if pattern.MatchString(filePath) {
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/types"
|
||||
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/imgconf/apk"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/java/jar"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/ruby/bundler"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/os/alpine"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/os/ubuntu"
|
||||
@@ -503,17 +504,23 @@ func TestAnalyzerGroup_AnalyzerVersions(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
disabled []analyzer.Type
|
||||
want map[string]int
|
||||
want analyzer.Versions
|
||||
}{
|
||||
{
|
||||
name: "happy path",
|
||||
disabled: []analyzer.Type{},
|
||||
want: map[string]int{
|
||||
"alpine": 1,
|
||||
"apk-repo": 1,
|
||||
"apk": 2,
|
||||
"bundler": 1,
|
||||
"ubuntu": 1,
|
||||
want: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"alpine": 1,
|
||||
"apk-repo": 1,
|
||||
"apk": 2,
|
||||
"bundler": 1,
|
||||
"ubuntu": 1,
|
||||
"ubuntu-esm": 1,
|
||||
},
|
||||
PostAnalyzers: map[string]int{
|
||||
"jar": 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -522,10 +529,15 @@ func TestAnalyzerGroup_AnalyzerVersions(t *testing.T) {
|
||||
analyzer.TypeAlpine,
|
||||
analyzer.TypeApkRepo,
|
||||
analyzer.TypeUbuntu,
|
||||
analyzer.TypeUbuntuESM,
|
||||
analyzer.TypeJar,
|
||||
},
|
||||
want: map[string]int{
|
||||
"apk": 2,
|
||||
"bundler": 1,
|
||||
want: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"apk": 2,
|
||||
"bundler": 1,
|
||||
},
|
||||
PostAnalyzers: map[string]int{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -91,12 +91,14 @@ func NewConfigAnalyzerGroup(opts ConfigAnalyzerOptions) (ConfigAnalyzerGroup, er
|
||||
}
|
||||
|
||||
// AnalyzerVersions returns analyzer version identifier used for cache keys.
|
||||
func (ag *ConfigAnalyzerGroup) AnalyzerVersions() map[string]int {
|
||||
func (ag *ConfigAnalyzerGroup) AnalyzerVersions() Versions {
|
||||
versions := map[string]int{}
|
||||
for _, ca := range ag.configAnalyzers {
|
||||
versions[string(ca.Type())] = ca.Version()
|
||||
}
|
||||
return versions
|
||||
return Versions{
|
||||
Analyzers: versions,
|
||||
}
|
||||
}
|
||||
|
||||
func (ag *ConfigAnalyzerGroup) AnalyzeImageConfig(ctx context.Context, targetOS types.OS, config *v1.ConfigFile) *ConfigAnalysisResult {
|
||||
|
||||
@@ -126,14 +126,16 @@ func TestConfigAnalyzerGroup_AnalyzerVersions(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
disabled []analyzer.Type
|
||||
want map[string]int
|
||||
want analyzer.Versions
|
||||
}{
|
||||
{
|
||||
name: "happy path",
|
||||
disabled: []analyzer.Type{},
|
||||
want: map[string]int{
|
||||
"apk-command": 1,
|
||||
"test": 1,
|
||||
want: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"apk-command": 1,
|
||||
"test": 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -142,8 +144,10 @@ func TestConfigAnalyzerGroup_AnalyzerVersions(t *testing.T) {
|
||||
analyzer.TypeAlpine,
|
||||
analyzer.TypeApkCommand,
|
||||
},
|
||||
want: map[string]int{
|
||||
"test": 1,
|
||||
want: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"test": 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ const (
|
||||
TypeRedHatBase Type = "redhat"
|
||||
TypeSUSE Type = "suse"
|
||||
TypeUbuntu Type = "ubuntu"
|
||||
TypeUbuntuESM Type = "ubuntu-esm"
|
||||
|
||||
// OS Package
|
||||
TypeApk Type = "apk"
|
||||
|
||||
@@ -23,7 +23,7 @@ func Analyze(fileType, filePath string, r dio.ReadSeekerAt, parser godeptypes.Pa
|
||||
return ToAnalysisResult(fileType, filePath, "", parsedLibs, parsedDependencies), nil
|
||||
}
|
||||
|
||||
func ToAnalysisResult(fileType, filePath, libFilePath string, libs []godeptypes.Library, depGraph []godeptypes.Dependency) *analyzer.AnalysisResult {
|
||||
func ToApplication(fileType, filePath, libFilePath string, libs []godeptypes.Library, depGraph []godeptypes.Dependency) *types.Application {
|
||||
if len(libs) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -61,11 +61,19 @@ func ToAnalysisResult(fileType, filePath, libFilePath string, libs []godeptypes.
|
||||
Locations: locs,
|
||||
})
|
||||
}
|
||||
apps := []types.Application{{
|
||||
|
||||
return &types.Application{
|
||||
Type: fileType,
|
||||
FilePath: filePath,
|
||||
Libraries: pkgs,
|
||||
}}
|
||||
|
||||
return &analyzer.AnalysisResult{Applications: apps}
|
||||
}
|
||||
}
|
||||
|
||||
func ToAnalysisResult(fileType, filePath, libFilePath string, libs []godeptypes.Library, depGraph []godeptypes.Dependency) *analyzer.AnalysisResult {
|
||||
app := ToApplication(fileType, filePath, libFilePath, libs, depGraph)
|
||||
if app == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &analyzer.AnalysisResult{Applications: []types.Application{*app}}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package jar
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -9,16 +10,18 @@ import (
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
dio "github.com/aquasecurity/go-dep-parser/pkg/io"
|
||||
"github.com/aquasecurity/go-dep-parser/pkg/java/jar"
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/analyzer"
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/analyzer/language"
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/types"
|
||||
"github.com/aquasecurity/trivy/pkg/javadb"
|
||||
"github.com/aquasecurity/trivy/pkg/log"
|
||||
"github.com/aquasecurity/trivy/pkg/parallel"
|
||||
)
|
||||
|
||||
func init() {
|
||||
analyzer.RegisterAnalyzer(&javaLibraryAnalyzer{})
|
||||
analyzer.RegisterPostAnalyzer(analyzer.TypeJar, newJavaLibraryAnalyzer)
|
||||
}
|
||||
|
||||
const version = 1
|
||||
@@ -34,9 +37,16 @@ var requiredExtensions = []string{
|
||||
type javaLibraryAnalyzer struct {
|
||||
once sync.Once
|
||||
client *javadb.DB
|
||||
slow bool
|
||||
}
|
||||
|
||||
func (a *javaLibraryAnalyzer) Analyze(_ context.Context, input analyzer.AnalysisInput) (*analyzer.AnalysisResult, error) {
|
||||
func newJavaLibraryAnalyzer(options analyzer.AnalyzerOptions) analyzer.PostAnalyzer {
|
||||
return &javaLibraryAnalyzer{
|
||||
slow: options.Slow,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *javaLibraryAnalyzer) PostAnalyze(ctx context.Context, input analyzer.PostAnalysisInput) (*analyzer.AnalysisResult, error) {
|
||||
// TODO: think about the sonatype API and "--offline"
|
||||
var err error
|
||||
a.once.Do(func() {
|
||||
@@ -57,13 +67,33 @@ func (a *javaLibraryAnalyzer) Analyze(_ context.Context, input analyzer.Analysis
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
p := jar.NewParser(a.client, jar.WithSize(input.Info.Size()), jar.WithFilePath(input.FilePath))
|
||||
libs, deps, err := p.Parse(input.Content)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("jar/war/ear/par parse error: %w", err)
|
||||
// It will be called on each JAR file
|
||||
onFile := func(path string, info fs.FileInfo, r dio.ReadSeekerAt) (*types.Application, error) {
|
||||
p := jar.NewParser(a.client, jar.WithSize(info.Size()), jar.WithFilePath(path))
|
||||
libs, deps, err := p.Parse(r)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("jar/war/ear/par parse error: %w", err)
|
||||
}
|
||||
|
||||
return language.ToApplication(types.Jar, path, path, libs, deps), nil
|
||||
}
|
||||
|
||||
return language.ToAnalysisResult(types.Jar, input.FilePath, input.FilePath, libs, deps), nil
|
||||
var apps []types.Application
|
||||
onResult := func(app *types.Application) error {
|
||||
if app == nil {
|
||||
return nil
|
||||
}
|
||||
apps = append(apps, *app)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = parallel.WalkDir(ctx, input.FS, ".", a.slow, onFile, onResult); err != nil {
|
||||
return nil, xerrors.Errorf("walk dir error: %w", err)
|
||||
}
|
||||
|
||||
return &analyzer.AnalysisResult{
|
||||
Applications: apps,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *javaLibraryAnalyzer) Required(filePath string, _ os.FileInfo) bool {
|
||||
|
||||
@@ -2,7 +2,9 @@ package jar
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/aquasecurity/trivy/pkg/mapfs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -126,22 +128,20 @@ func Test_javaLibraryAnalyzer_Analyze(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f, err := os.Open(tt.inputFile)
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
|
||||
stat, err := f.Stat()
|
||||
require.NoError(t, err)
|
||||
|
||||
// init java-trivy-db with skip update
|
||||
javadb.Init("testdata", defaultJavaDBRepository, true, false, false)
|
||||
|
||||
a := javaLibraryAnalyzer{}
|
||||
a := javaLibraryAnalyzer{slow: true}
|
||||
ctx := context.Background()
|
||||
got, err := a.Analyze(ctx, analyzer.AnalysisInput{
|
||||
FilePath: tt.inputFile,
|
||||
Info: stat,
|
||||
Content: f,
|
||||
|
||||
mfs := mapfs.New()
|
||||
err := mfs.MkdirAll(filepath.Dir(tt.inputFile), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = mfs.WriteFile(tt.inputFile, tt.inputFile)
|
||||
assert.NoError(t, err)
|
||||
|
||||
got, err := a.PostAnalyze(ctx, analyzer.PostAnalysisInput{
|
||||
FS: mfs,
|
||||
})
|
||||
|
||||
if tt.wantErr != "" {
|
||||
|
||||
@@ -54,7 +54,7 @@ func (a ubuntuESMAnalyzer) Required(filePath string, _ os.FileInfo) bool {
|
||||
}
|
||||
|
||||
func (a ubuntuESMAnalyzer) Type() analyzer.Type {
|
||||
return analyzer.TypeUbuntu
|
||||
return analyzer.TypeUbuntuESM
|
||||
}
|
||||
|
||||
func (a ubuntuESMAnalyzer) Version() int {
|
||||
|
||||
@@ -4,7 +4,9 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -21,7 +23,9 @@ import (
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/log"
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/types"
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/walker"
|
||||
"github.com/aquasecurity/trivy/pkg/mapfs"
|
||||
"github.com/aquasecurity/trivy/pkg/semaphore"
|
||||
"github.com/aquasecurity/trivy/pkg/syncx"
|
||||
)
|
||||
|
||||
type Artifact struct {
|
||||
@@ -49,6 +53,7 @@ func NewArtifact(img types.Image, c cache.ArtifactCache, opt artifact.Option) (a
|
||||
|
||||
a, err := analyzer.NewAnalyzerGroup(analyzer.AnalyzerOptions{
|
||||
Group: opt.AnalyzerGroup,
|
||||
Slow: opt.Slow,
|
||||
FilePatterns: opt.FilePatterns,
|
||||
DisabledAnalyzers: opt.DisabledAnalyzers,
|
||||
SecretScannerOption: opt.SecretScannerOption,
|
||||
@@ -234,7 +239,7 @@ func (a Artifact) inspect(ctx context.Context, missingImage string, layerKeys, b
|
||||
|
||||
layerInfo, err := a.inspectLayer(ctx, layer, disabledAnalyers)
|
||||
if err != nil {
|
||||
errCh <- xerrors.Errorf("failed to analyze layer: %s : %w", layerInfo.DiffID, err)
|
||||
errCh <- xerrors.Errorf("failed to analyze layer (%s): %w", layer.DiffID, err)
|
||||
return
|
||||
}
|
||||
if err = a.cache.PutBlob(layerKey, layerInfo); err != nil {
|
||||
@@ -282,11 +287,25 @@ func (a Artifact) inspectLayer(ctx context.Context, layerInfo LayerInfo, disable
|
||||
result := analyzer.NewAnalysisResult()
|
||||
limit := semaphore.New(a.artifactOption.Slow)
|
||||
|
||||
// Prepare filesystem for post analysis
|
||||
files := new(syncx.Map[analyzer.Type, *mapfs.FS])
|
||||
tmpDir, err := os.MkdirTemp("", "layers-*")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, xerrors.Errorf("mkdir temp error: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Walk a tar layer
|
||||
opqDirs, whFiles, err := a.walker.Walk(rc, func(filePath string, info os.FileInfo, opener analyzer.Opener) error {
|
||||
if err = a.analyzer.AnalyzeFile(ctx, &wg, limit, result, "", filePath, info, opener, disabled, opts); err != nil {
|
||||
return xerrors.Errorf("failed to analyze %s: %w", filePath, err)
|
||||
}
|
||||
|
||||
// Build filesystem for post analysis
|
||||
if err = a.buildFS(tmpDir, filePath, info, opener, files); err != nil {
|
||||
return xerrors.Errorf("failed to build filesystem: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
@@ -296,6 +315,11 @@ func (a Artifact) inspectLayer(ctx context.Context, layerInfo LayerInfo, disable
|
||||
// Wait for all the goroutine to finish.
|
||||
wg.Wait()
|
||||
|
||||
// Post-analysis
|
||||
if err = a.analyzer.PostAnalyze(ctx, files, result, opts); err != nil {
|
||||
return types.BlobInfo{}, xerrors.Errorf("post analysis error: %w", err)
|
||||
}
|
||||
|
||||
// Sort the analysis result for consistent results
|
||||
result.Sort()
|
||||
|
||||
@@ -326,6 +350,55 @@ func (a Artifact) inspectLayer(ctx context.Context, layerInfo LayerInfo, disable
|
||||
return blobInfo, nil
|
||||
}
|
||||
|
||||
// buildFS creates filesystem for post analysis
|
||||
func (a Artifact) buildFS(tmpDir, filePath string, info os.FileInfo, opener analyzer.Opener,
|
||||
files *syncx.Map[analyzer.Type, *mapfs.FS]) error {
|
||||
// Get all post-analyzers that want to analyze the file
|
||||
atypes := a.analyzer.RequiredPostAnalyzers(filePath, info)
|
||||
if len(atypes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a temporary file to which the file in the layer will be copied
|
||||
// so that all the files will not be loaded into memory
|
||||
f, err := os.CreateTemp(tmpDir, "layer-file-*")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create temp error: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Open a file in the layer
|
||||
r, err := opener()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("file open error: %w", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// Copy file content into the temporary file
|
||||
if _, err = io.Copy(f, r); err != nil {
|
||||
return xerrors.Errorf("copy error: %w", err)
|
||||
}
|
||||
|
||||
if err = os.Chmod(f.Name(), info.Mode()); err != nil {
|
||||
return xerrors.Errorf("chmod error: %w", err)
|
||||
}
|
||||
|
||||
// Create fs.FS for each post-analyzer that wants to analyze the current file
|
||||
for _, at := range atypes {
|
||||
fsys, _ := files.LoadOrStore(at, mapfs.New())
|
||||
if dir := filepath.Dir(filePath); dir != "." {
|
||||
if err := fsys.MkdirAll(dir, os.ModePerm); err != nil && !errors.Is(err, fs.ErrExist) {
|
||||
return xerrors.Errorf("mapfs mkdir error: %w", err)
|
||||
}
|
||||
}
|
||||
err = fsys.WriteFile(filePath, f.Name())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("mapfs write error: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a Artifact) diffIDs(configFile *v1.ConfigFile) []string {
|
||||
if configFile == nil {
|
||||
return nil
|
||||
|
||||
@@ -170,18 +170,18 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
},
|
||||
missingBlobsExpectation: cache.ArtifactCacheMissingBlobsExpectation{
|
||||
Args: cache.ArtifactCacheMissingBlobsArgs{
|
||||
ArtifactID: "sha256:059741cfbdc039e88e337d621e57e03e99b0e0a75df32f2027ebef13f839af65",
|
||||
BlobIDs: []string{"sha256:a07b19e0e0a4339c00d982f6d80f305cd9dbb98f88b3c74e57b97574b9ce9ba3"},
|
||||
ArtifactID: "sha256:c232b7d8ac8aa08aa767313d0b53084c4380d1c01a213a5971bdb039e6538313",
|
||||
BlobIDs: []string{"sha256:7499fcc2ebee2c9b403a67a4fdebbda1d0a846b66485c03f3b4d869c424f7255"},
|
||||
},
|
||||
Returns: cache.ArtifactCacheMissingBlobsReturns{
|
||||
MissingArtifact: true,
|
||||
MissingBlobIDs: []string{"sha256:a07b19e0e0a4339c00d982f6d80f305cd9dbb98f88b3c74e57b97574b9ce9ba3"},
|
||||
MissingBlobIDs: []string{"sha256:7499fcc2ebee2c9b403a67a4fdebbda1d0a846b66485c03f3b4d869c424f7255"},
|
||||
},
|
||||
},
|
||||
putBlobExpectations: []cache.ArtifactCachePutBlobExpectation{
|
||||
{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:a07b19e0e0a4339c00d982f6d80f305cd9dbb98f88b3c74e57b97574b9ce9ba3",
|
||||
BlobID: "sha256:7499fcc2ebee2c9b403a67a4fdebbda1d0a846b66485c03f3b4d869c424f7255",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
Digest: "",
|
||||
@@ -233,7 +233,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
putArtifactExpectations: []cache.ArtifactCachePutArtifactExpectation{
|
||||
{
|
||||
Args: cache.ArtifactCachePutArtifactArgs{
|
||||
ArtifactID: "sha256:059741cfbdc039e88e337d621e57e03e99b0e0a75df32f2027ebef13f839af65",
|
||||
ArtifactID: "sha256:c232b7d8ac8aa08aa767313d0b53084c4380d1c01a213a5971bdb039e6538313",
|
||||
ArtifactInfo: types.ArtifactInfo{
|
||||
SchemaVersion: types.ArtifactJSONSchemaVersion,
|
||||
Architecture: "amd64",
|
||||
@@ -247,8 +247,8 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "../../test/testdata/alpine-311.tar.gz",
|
||||
Type: types.ArtifactContainerImage,
|
||||
ID: "sha256:059741cfbdc039e88e337d621e57e03e99b0e0a75df32f2027ebef13f839af65",
|
||||
BlobIDs: []string{"sha256:a07b19e0e0a4339c00d982f6d80f305cd9dbb98f88b3c74e57b97574b9ce9ba3"},
|
||||
ID: "sha256:c232b7d8ac8aa08aa767313d0b53084c4380d1c01a213a5971bdb039e6538313",
|
||||
BlobIDs: []string{"sha256:7499fcc2ebee2c9b403a67a4fdebbda1d0a846b66485c03f3b4d869c424f7255"},
|
||||
ImageMetadata: types.ImageMetadata{
|
||||
ID: "sha256:a187dde48cd289ac374ad8539930628314bc581a481cdb41409c9289419ddb72",
|
||||
DiffIDs: []string{
|
||||
@@ -304,27 +304,27 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
},
|
||||
missingBlobsExpectation: cache.ArtifactCacheMissingBlobsExpectation{
|
||||
Args: cache.ArtifactCacheMissingBlobsArgs{
|
||||
ArtifactID: "sha256:a646bb11d39c149d4aaf9b888233048e0848304e5abd75667ea6f21d540d800c",
|
||||
ArtifactID: "sha256:33f9415ed2cd5a9cef5d5144333619745b9ec0f851f0684dd45fa79c6b26a650",
|
||||
BlobIDs: []string{
|
||||
"sha256:3bfd543b4467abb972bfeeaa7a7d75ee18e7c92f077600d48fd1532f09129b54",
|
||||
"sha256:9f8cf74dff8cad6bb4df8fc0fa81dca446bd6f44c2d811a167ffca34ab90a6f8",
|
||||
"sha256:7487a77e30f32bd00fc35322a7fa308686bf3da17bb63f6a2bb2e9398a9a0357",
|
||||
"sha256:9aead687f9207ee718401fef3174f8e5b9a99114f3bcdcae18ddc34d88ed4906",
|
||||
"sha256:0f64152e3c6ae87b21d4bdd1725bcf1acd4deb613e05a8b31b8c7631d4ac38a3",
|
||||
"sha256:d0baf11bfd2bb23d66b9168d4349290bd01fb45518c17107ee7c2793cde4eeb8",
|
||||
"sha256:8cf65b3504af552bf010ff9765a13abbd21a3b8203563ea9426d7964f2aee98a",
|
||||
"sha256:992b2404a25612b71887531933b4fb4cd6031ebb671df3fde834c5574d62958b",
|
||||
},
|
||||
},
|
||||
Returns: cache.ArtifactCacheMissingBlobsReturns{
|
||||
MissingBlobIDs: []string{
|
||||
"sha256:3bfd543b4467abb972bfeeaa7a7d75ee18e7c92f077600d48fd1532f09129b54",
|
||||
"sha256:9f8cf74dff8cad6bb4df8fc0fa81dca446bd6f44c2d811a167ffca34ab90a6f8",
|
||||
"sha256:7487a77e30f32bd00fc35322a7fa308686bf3da17bb63f6a2bb2e9398a9a0357",
|
||||
"sha256:9aead687f9207ee718401fef3174f8e5b9a99114f3bcdcae18ddc34d88ed4906",
|
||||
"sha256:0f64152e3c6ae87b21d4bdd1725bcf1acd4deb613e05a8b31b8c7631d4ac38a3",
|
||||
"sha256:d0baf11bfd2bb23d66b9168d4349290bd01fb45518c17107ee7c2793cde4eeb8",
|
||||
"sha256:8cf65b3504af552bf010ff9765a13abbd21a3b8203563ea9426d7964f2aee98a",
|
||||
"sha256:992b2404a25612b71887531933b4fb4cd6031ebb671df3fde834c5574d62958b",
|
||||
},
|
||||
},
|
||||
},
|
||||
putBlobExpectations: []cache.ArtifactCachePutBlobExpectation{
|
||||
{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:3bfd543b4467abb972bfeeaa7a7d75ee18e7c92f077600d48fd1532f09129b54",
|
||||
BlobID: "sha256:0f64152e3c6ae87b21d4bdd1725bcf1acd4deb613e05a8b31b8c7631d4ac38a3",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
Digest: "",
|
||||
@@ -397,7 +397,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:9f8cf74dff8cad6bb4df8fc0fa81dca446bd6f44c2d811a167ffca34ab90a6f8",
|
||||
BlobID: "sha256:d0baf11bfd2bb23d66b9168d4349290bd01fb45518c17107ee7c2793cde4eeb8",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
Digest: "",
|
||||
@@ -478,7 +478,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:7487a77e30f32bd00fc35322a7fa308686bf3da17bb63f6a2bb2e9398a9a0357",
|
||||
BlobID: "sha256:8cf65b3504af552bf010ff9765a13abbd21a3b8203563ea9426d7964f2aee98a",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
Digest: "",
|
||||
@@ -511,7 +511,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:9aead687f9207ee718401fef3174f8e5b9a99114f3bcdcae18ddc34d88ed4906",
|
||||
BlobID: "sha256:992b2404a25612b71887531933b4fb4cd6031ebb671df3fde834c5574d62958b",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
Digest: "",
|
||||
@@ -588,12 +588,12 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "../../test/testdata/vuln-image.tar.gz",
|
||||
Type: types.ArtifactContainerImage,
|
||||
ID: "sha256:a646bb11d39c149d4aaf9b888233048e0848304e5abd75667ea6f21d540d800c",
|
||||
ID: "sha256:33f9415ed2cd5a9cef5d5144333619745b9ec0f851f0684dd45fa79c6b26a650",
|
||||
BlobIDs: []string{
|
||||
"sha256:3bfd543b4467abb972bfeeaa7a7d75ee18e7c92f077600d48fd1532f09129b54",
|
||||
"sha256:9f8cf74dff8cad6bb4df8fc0fa81dca446bd6f44c2d811a167ffca34ab90a6f8",
|
||||
"sha256:7487a77e30f32bd00fc35322a7fa308686bf3da17bb63f6a2bb2e9398a9a0357",
|
||||
"sha256:9aead687f9207ee718401fef3174f8e5b9a99114f3bcdcae18ddc34d88ed4906",
|
||||
"sha256:0f64152e3c6ae87b21d4bdd1725bcf1acd4deb613e05a8b31b8c7631d4ac38a3",
|
||||
"sha256:d0baf11bfd2bb23d66b9168d4349290bd01fb45518c17107ee7c2793cde4eeb8",
|
||||
"sha256:8cf65b3504af552bf010ff9765a13abbd21a3b8203563ea9426d7964f2aee98a",
|
||||
"sha256:992b2404a25612b71887531933b4fb4cd6031ebb671df3fde834c5574d62958b",
|
||||
},
|
||||
ImageMetadata: types.ImageMetadata{
|
||||
ID: "sha256:58701fd185bda36cab0557bb6438661831267aa4a9e0b54211c4d5317a48aff4",
|
||||
@@ -678,27 +678,27 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
},
|
||||
missingBlobsExpectation: cache.ArtifactCacheMissingBlobsExpectation{
|
||||
Args: cache.ArtifactCacheMissingBlobsArgs{
|
||||
ArtifactID: "sha256:a646bb11d39c149d4aaf9b888233048e0848304e5abd75667ea6f21d540d800c",
|
||||
ArtifactID: "sha256:33f9415ed2cd5a9cef5d5144333619745b9ec0f851f0684dd45fa79c6b26a650",
|
||||
BlobIDs: []string{
|
||||
"sha256:ef7f3617f4e698a7378c222861ad779caf39293eb75bc40a297feb0f04997773",
|
||||
"sha256:47b4982cb2f3465af796707b5d3204ecebff8904a7de34b994b0f349b00749e3",
|
||||
"sha256:4c59618ffe6b2dd606b9342bc7cf5673d87e562789b7f132e5234b2e7412a01f",
|
||||
"sha256:3943af1221bbd84efc398958163456fca57e1400c24eeb2db9e2ddad7c1f37c0",
|
||||
"sha256:ce763fafc4c45bc6311188adfcd8b932fa42553f3324bb9ec8649e5f7c3f9f14",
|
||||
"sha256:b3765fc11963a0c92cc8c8ef0c8a3c54c9a3111100ae69384049b2d7b15419ae",
|
||||
"sha256:1bd6f23a3c252702080dd0e524f9ef13d8ff918e15b322fd8b5c2ceb9f5b8b4f",
|
||||
"sha256:9589cedce50fd3d37c19f22a5653dece7a092edff293a598d15125eb2a4d8849",
|
||||
},
|
||||
},
|
||||
Returns: cache.ArtifactCacheMissingBlobsReturns{
|
||||
MissingBlobIDs: []string{
|
||||
"sha256:ef7f3617f4e698a7378c222861ad779caf39293eb75bc40a297feb0f04997773",
|
||||
"sha256:47b4982cb2f3465af796707b5d3204ecebff8904a7de34b994b0f349b00749e3",
|
||||
"sha256:4c59618ffe6b2dd606b9342bc7cf5673d87e562789b7f132e5234b2e7412a01f",
|
||||
"sha256:3943af1221bbd84efc398958163456fca57e1400c24eeb2db9e2ddad7c1f37c0",
|
||||
"sha256:ce763fafc4c45bc6311188adfcd8b932fa42553f3324bb9ec8649e5f7c3f9f14",
|
||||
"sha256:b3765fc11963a0c92cc8c8ef0c8a3c54c9a3111100ae69384049b2d7b15419ae",
|
||||
"sha256:1bd6f23a3c252702080dd0e524f9ef13d8ff918e15b322fd8b5c2ceb9f5b8b4f",
|
||||
"sha256:9589cedce50fd3d37c19f22a5653dece7a092edff293a598d15125eb2a4d8849",
|
||||
},
|
||||
},
|
||||
},
|
||||
putBlobExpectations: []cache.ArtifactCachePutBlobExpectation{
|
||||
{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:ef7f3617f4e698a7378c222861ad779caf39293eb75bc40a297feb0f04997773",
|
||||
BlobID: "sha256:ce763fafc4c45bc6311188adfcd8b932fa42553f3324bb9ec8649e5f7c3f9f14",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
Digest: "",
|
||||
@@ -709,7 +709,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:47b4982cb2f3465af796707b5d3204ecebff8904a7de34b994b0f349b00749e3",
|
||||
BlobID: "sha256:b3765fc11963a0c92cc8c8ef0c8a3c54c9a3111100ae69384049b2d7b15419ae",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
Digest: "",
|
||||
@@ -720,7 +720,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:4c59618ffe6b2dd606b9342bc7cf5673d87e562789b7f132e5234b2e7412a01f",
|
||||
BlobID: "sha256:1bd6f23a3c252702080dd0e524f9ef13d8ff918e15b322fd8b5c2ceb9f5b8b4f",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
Digest: "",
|
||||
@@ -732,7 +732,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:3943af1221bbd84efc398958163456fca57e1400c24eeb2db9e2ddad7c1f37c0",
|
||||
BlobID: "sha256:9589cedce50fd3d37c19f22a5653dece7a092edff293a598d15125eb2a4d8849",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
Digest: "",
|
||||
@@ -746,12 +746,12 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "../../test/testdata/vuln-image.tar.gz",
|
||||
Type: types.ArtifactContainerImage,
|
||||
ID: "sha256:a646bb11d39c149d4aaf9b888233048e0848304e5abd75667ea6f21d540d800c",
|
||||
ID: "sha256:33f9415ed2cd5a9cef5d5144333619745b9ec0f851f0684dd45fa79c6b26a650",
|
||||
BlobIDs: []string{
|
||||
"sha256:ef7f3617f4e698a7378c222861ad779caf39293eb75bc40a297feb0f04997773",
|
||||
"sha256:47b4982cb2f3465af796707b5d3204ecebff8904a7de34b994b0f349b00749e3",
|
||||
"sha256:4c59618ffe6b2dd606b9342bc7cf5673d87e562789b7f132e5234b2e7412a01f",
|
||||
"sha256:3943af1221bbd84efc398958163456fca57e1400c24eeb2db9e2ddad7c1f37c0",
|
||||
"sha256:ce763fafc4c45bc6311188adfcd8b932fa42553f3324bb9ec8649e5f7c3f9f14",
|
||||
"sha256:b3765fc11963a0c92cc8c8ef0c8a3c54c9a3111100ae69384049b2d7b15419ae",
|
||||
"sha256:1bd6f23a3c252702080dd0e524f9ef13d8ff918e15b322fd8b5c2ceb9f5b8b4f",
|
||||
"sha256:9589cedce50fd3d37c19f22a5653dece7a092edff293a598d15125eb2a4d8849",
|
||||
},
|
||||
ImageMetadata: types.ImageMetadata{
|
||||
ID: "sha256:58701fd185bda36cab0557bb6438661831267aa4a9e0b54211c4d5317a48aff4",
|
||||
@@ -833,8 +833,8 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
imagePath: "../../test/testdata/alpine-311.tar.gz",
|
||||
missingBlobsExpectation: cache.ArtifactCacheMissingBlobsExpectation{
|
||||
Args: cache.ArtifactCacheMissingBlobsArgs{
|
||||
ArtifactID: "sha256:059741cfbdc039e88e337d621e57e03e99b0e0a75df32f2027ebef13f839af65",
|
||||
BlobIDs: []string{"sha256:a07b19e0e0a4339c00d982f6d80f305cd9dbb98f88b3c74e57b97574b9ce9ba3"},
|
||||
ArtifactID: "sha256:c232b7d8ac8aa08aa767313d0b53084c4380d1c01a213a5971bdb039e6538313",
|
||||
BlobIDs: []string{"sha256:7499fcc2ebee2c9b403a67a4fdebbda1d0a846b66485c03f3b4d869c424f7255"},
|
||||
},
|
||||
Returns: cache.ArtifactCacheMissingBlobsReturns{
|
||||
Err: xerrors.New("MissingBlobs failed"),
|
||||
@@ -847,17 +847,17 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
imagePath: "../../test/testdata/alpine-311.tar.gz",
|
||||
missingBlobsExpectation: cache.ArtifactCacheMissingBlobsExpectation{
|
||||
Args: cache.ArtifactCacheMissingBlobsArgs{
|
||||
ArtifactID: "sha256:059741cfbdc039e88e337d621e57e03e99b0e0a75df32f2027ebef13f839af65",
|
||||
BlobIDs: []string{"sha256:a07b19e0e0a4339c00d982f6d80f305cd9dbb98f88b3c74e57b97574b9ce9ba3"},
|
||||
ArtifactID: "sha256:c232b7d8ac8aa08aa767313d0b53084c4380d1c01a213a5971bdb039e6538313",
|
||||
BlobIDs: []string{"sha256:7499fcc2ebee2c9b403a67a4fdebbda1d0a846b66485c03f3b4d869c424f7255"},
|
||||
},
|
||||
Returns: cache.ArtifactCacheMissingBlobsReturns{
|
||||
MissingBlobIDs: []string{"sha256:a07b19e0e0a4339c00d982f6d80f305cd9dbb98f88b3c74e57b97574b9ce9ba3"},
|
||||
MissingBlobIDs: []string{"sha256:7499fcc2ebee2c9b403a67a4fdebbda1d0a846b66485c03f3b4d869c424f7255"},
|
||||
},
|
||||
},
|
||||
putBlobExpectations: []cache.ArtifactCachePutBlobExpectation{
|
||||
{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:a07b19e0e0a4339c00d982f6d80f305cd9dbb98f88b3c74e57b97574b9ce9ba3",
|
||||
BlobID: "sha256:7499fcc2ebee2c9b403a67a4fdebbda1d0a846b66485c03f3b4d869c424f7255",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
Digest: "",
|
||||
@@ -915,18 +915,18 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
imagePath: "../../test/testdata/alpine-311.tar.gz",
|
||||
missingBlobsExpectation: cache.ArtifactCacheMissingBlobsExpectation{
|
||||
Args: cache.ArtifactCacheMissingBlobsArgs{
|
||||
ArtifactID: "sha256:059741cfbdc039e88e337d621e57e03e99b0e0a75df32f2027ebef13f839af65",
|
||||
BlobIDs: []string{"sha256:a07b19e0e0a4339c00d982f6d80f305cd9dbb98f88b3c74e57b97574b9ce9ba3"},
|
||||
ArtifactID: "sha256:c232b7d8ac8aa08aa767313d0b53084c4380d1c01a213a5971bdb039e6538313",
|
||||
BlobIDs: []string{"sha256:7499fcc2ebee2c9b403a67a4fdebbda1d0a846b66485c03f3b4d869c424f7255"},
|
||||
},
|
||||
Returns: cache.ArtifactCacheMissingBlobsReturns{
|
||||
MissingArtifact: true,
|
||||
MissingBlobIDs: []string{"sha256:a07b19e0e0a4339c00d982f6d80f305cd9dbb98f88b3c74e57b97574b9ce9ba3"},
|
||||
MissingBlobIDs: []string{"sha256:7499fcc2ebee2c9b403a67a4fdebbda1d0a846b66485c03f3b4d869c424f7255"},
|
||||
},
|
||||
},
|
||||
putBlobExpectations: []cache.ArtifactCachePutBlobExpectation{
|
||||
{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:a07b19e0e0a4339c00d982f6d80f305cd9dbb98f88b3c74e57b97574b9ce9ba3",
|
||||
BlobID: "sha256:7499fcc2ebee2c9b403a67a4fdebbda1d0a846b66485c03f3b4d869c424f7255",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
Digest: "",
|
||||
@@ -978,7 +978,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
putArtifactExpectations: []cache.ArtifactCachePutArtifactExpectation{
|
||||
{
|
||||
Args: cache.ArtifactCachePutArtifactArgs{
|
||||
ArtifactID: "sha256:059741cfbdc039e88e337d621e57e03e99b0e0a75df32f2027ebef13f839af65",
|
||||
ArtifactID: "sha256:c232b7d8ac8aa08aa767313d0b53084c4380d1c01a213a5971bdb039e6538313",
|
||||
ArtifactInfo: types.ArtifactInfo{
|
||||
SchemaVersion: types.ArtifactJSONSchemaVersion,
|
||||
Architecture: "amd64",
|
||||
|
||||
@@ -59,7 +59,7 @@ func TestArtifact_InspectRekorAttestation(t *testing.T) {
|
||||
putBlobExpectations: []cache.ArtifactCachePutBlobExpectation{
|
||||
{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:8c90c68f385a8067778a200fd3e56e257d4d6dd563e519a7be65902ee0b6e861",
|
||||
BlobID: "sha256:9c23872047046e145f49fb5533b63ace0cbf819f5b68e33f69f4e9bbab4c517e",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
OS: types.OS{
|
||||
@@ -94,9 +94,9 @@ func TestArtifact_InspectRekorAttestation(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "test/image:10",
|
||||
Type: types.ArtifactCycloneDX,
|
||||
ID: "sha256:8c90c68f385a8067778a200fd3e56e257d4d6dd563e519a7be65902ee0b6e861",
|
||||
ID: "sha256:9c23872047046e145f49fb5533b63ace0cbf819f5b68e33f69f4e9bbab4c517e",
|
||||
BlobIDs: []string{
|
||||
"sha256:8c90c68f385a8067778a200fd3e56e257d4d6dd563e519a7be65902ee0b6e861",
|
||||
"sha256:9c23872047046e145f49fb5533b63ace0cbf819f5b68e33f69f4e9bbab4c517e",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -19,7 +21,9 @@ import (
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/types"
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/walker"
|
||||
"github.com/aquasecurity/trivy/pkg/log"
|
||||
"github.com/aquasecurity/trivy/pkg/mapfs"
|
||||
"github.com/aquasecurity/trivy/pkg/semaphore"
|
||||
"github.com/aquasecurity/trivy/pkg/syncx"
|
||||
)
|
||||
|
||||
type Artifact struct {
|
||||
@@ -40,6 +44,7 @@ func NewArtifact(rootPath string, c cache.ArtifactCache, opt artifact.Option) (a
|
||||
|
||||
a, err := analyzer.NewAnalyzerGroup(analyzer.AnalyzerOptions{
|
||||
Group: opt.AnalyzerGroup,
|
||||
Slow: opt.Slow,
|
||||
FilePatterns: opt.FilePatterns,
|
||||
DisabledAnalyzers: opt.DisabledAnalyzers,
|
||||
SecretScannerOption: opt.SecretScannerOption,
|
||||
@@ -119,20 +124,29 @@ func (a Artifact) Inspect(ctx context.Context) (types.ArtifactReference, error)
|
||||
var wg sync.WaitGroup
|
||||
result := analyzer.NewAnalysisResult()
|
||||
limit := semaphore.New(a.artifactOption.Slow)
|
||||
opts := analyzer.AnalysisOptions{Offline: a.artifactOption.Offline}
|
||||
|
||||
// Prepare filesystem for post analysis
|
||||
files := new(syncx.Map[analyzer.Type, *mapfs.FS])
|
||||
|
||||
err := a.walker.Walk(a.rootPath, func(filePath string, info os.FileInfo, opener analyzer.Opener) error {
|
||||
directory := a.rootPath
|
||||
dir := a.rootPath
|
||||
|
||||
// When the directory is the same as the filePath, a file was given
|
||||
// instead of a directory, rewrite the file path and directory in this case.
|
||||
if filePath == "." {
|
||||
directory, filePath = filepath.Split(a.rootPath)
|
||||
dir, filePath = filepath.Split(a.rootPath)
|
||||
}
|
||||
|
||||
opts := analyzer.AnalysisOptions{Offline: a.artifactOption.Offline}
|
||||
if err := a.analyzer.AnalyzeFile(ctx, &wg, limit, result, directory, filePath, info, opener, nil, opts); err != nil {
|
||||
if err := a.analyzer.AnalyzeFile(ctx, &wg, limit, result, dir, filePath, info, opener, nil, opts); err != nil {
|
||||
return xerrors.Errorf("analyze file (%s): %w", filePath, err)
|
||||
}
|
||||
|
||||
// Build filesystem for post analysis
|
||||
if err := a.buildFS(dir, filePath, info, files); err != nil {
|
||||
return xerrors.Errorf("failed to build filesystem: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
@@ -142,6 +156,11 @@ func (a Artifact) Inspect(ctx context.Context) (types.ArtifactReference, error)
|
||||
// Wait for all the goroutine to finish.
|
||||
wg.Wait()
|
||||
|
||||
// Post-analysis
|
||||
if err = a.analyzer.PostAnalyze(ctx, files, result, opts); err != nil {
|
||||
return types.ArtifactReference{}, xerrors.Errorf("post analysis error: %w", err)
|
||||
}
|
||||
|
||||
// Sort the analysis result for consistent results
|
||||
result.Sort()
|
||||
|
||||
@@ -206,3 +225,26 @@ func (a Artifact) calcCacheKey(blobInfo types.BlobInfo) (string, error) {
|
||||
|
||||
return cacheKey, nil
|
||||
}
|
||||
|
||||
// buildFS creates filesystem for post analysis
|
||||
func (a Artifact) buildFS(dir, filePath string, info os.FileInfo, files *syncx.Map[analyzer.Type, *mapfs.FS]) error {
|
||||
// Get all post-analyzers that want to analyze the file
|
||||
atypes := a.analyzer.RequiredPostAnalyzers(filePath, info)
|
||||
if len(atypes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create fs.FS for each post-analyzer that wants to analyze the current file
|
||||
for _, at := range atypes {
|
||||
mfs, _ := files.LoadOrStore(at, mapfs.New())
|
||||
if d := filepath.Dir(filePath); d != "." {
|
||||
if err := mfs.MkdirAll(d, os.ModePerm); err != nil && !errors.Is(err, fs.ErrExist) {
|
||||
return xerrors.Errorf("mapfs mkdir error: %w", err)
|
||||
}
|
||||
}
|
||||
if err := mfs.WriteFile(filePath, filepath.Join(dir, filePath)); err != nil {
|
||||
return xerrors.Errorf("mapfs write error: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
},
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:7177f27ce94e21305ba8efe2ced3533ba9be66bd251aaa217615469a29ed86a9",
|
||||
BlobID: "sha256:40ca14c99b2b22a5f78c1d1a2cbfeeaa3243e3fe1cf150839209ca3b5a897e62",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
OS: types.OS{
|
||||
@@ -74,9 +74,9 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "host",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:7177f27ce94e21305ba8efe2ced3533ba9be66bd251aaa217615469a29ed86a9",
|
||||
ID: "sha256:40ca14c99b2b22a5f78c1d1a2cbfeeaa3243e3fe1cf150839209ca3b5a897e62",
|
||||
BlobIDs: []string{
|
||||
"sha256:7177f27ce94e21305ba8efe2ced3533ba9be66bd251aaa217615469a29ed86a9",
|
||||
"sha256:40ca14c99b2b22a5f78c1d1a2cbfeeaa3243e3fe1cf150839209ca3b5a897e62",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -90,7 +90,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
},
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:25af809c209a60d5c852a9cd0fe0ea853f12876b693b7e3a90ba36236976f16a",
|
||||
BlobID: "sha256:8a4332f0b77c97330369206f2e1d144bfa4cd58ccba42a61d3618da8267435c8",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
},
|
||||
@@ -100,9 +100,9 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "host",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:25af809c209a60d5c852a9cd0fe0ea853f12876b693b7e3a90ba36236976f16a",
|
||||
ID: "sha256:8a4332f0b77c97330369206f2e1d144bfa4cd58ccba42a61d3618da8267435c8",
|
||||
BlobIDs: []string{
|
||||
"sha256:25af809c209a60d5c852a9cd0fe0ea853f12876b693b7e3a90ba36236976f16a",
|
||||
"sha256:8a4332f0b77c97330369206f2e1d144bfa4cd58ccba42a61d3618da8267435c8",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -113,7 +113,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
},
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:7177f27ce94e21305ba8efe2ced3533ba9be66bd251aaa217615469a29ed86a9",
|
||||
BlobID: "sha256:40ca14c99b2b22a5f78c1d1a2cbfeeaa3243e3fe1cf150839209ca3b5a897e62",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
OS: types.OS{
|
||||
@@ -154,7 +154,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
},
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:5733e6d01251440e3ce19f0171a43360c50d32205051b2889187b8dd00e8d515",
|
||||
BlobID: "sha256:45358d29778e36270f6fafd84e45e175e7aae7c0101b72eef99cee6dc598f5d4",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
Applications: []types.Application{
|
||||
@@ -176,9 +176,9 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/requirements.txt",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:5733e6d01251440e3ce19f0171a43360c50d32205051b2889187b8dd00e8d515",
|
||||
ID: "sha256:45358d29778e36270f6fafd84e45e175e7aae7c0101b72eef99cee6dc598f5d4",
|
||||
BlobIDs: []string{
|
||||
"sha256:5733e6d01251440e3ce19f0171a43360c50d32205051b2889187b8dd00e8d515",
|
||||
"sha256:45358d29778e36270f6fafd84e45e175e7aae7c0101b72eef99cee6dc598f5d4",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -189,7 +189,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
},
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:5733e6d01251440e3ce19f0171a43360c50d32205051b2889187b8dd00e8d515",
|
||||
BlobID: "sha256:45358d29778e36270f6fafd84e45e175e7aae7c0101b72eef99cee6dc598f5d4",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
Applications: []types.Application{
|
||||
@@ -211,9 +211,9 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/requirements.txt",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:5733e6d01251440e3ce19f0171a43360c50d32205051b2889187b8dd00e8d515",
|
||||
ID: "sha256:45358d29778e36270f6fafd84e45e175e7aae7c0101b72eef99cee6dc598f5d4",
|
||||
BlobIDs: []string{
|
||||
"sha256:5733e6d01251440e3ce19f0171a43360c50d32205051b2889187b8dd00e8d515",
|
||||
"sha256:45358d29778e36270f6fafd84e45e175e7aae7c0101b72eef99cee6dc598f5d4",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -447,9 +447,9 @@ func TestTerraformMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/terraform/single-failure/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:7695efb9660d47bc53851aea5ca7d7e1bb1c90c22a18e8fd37b6d0634a03b69d",
|
||||
ID: "sha256:8a71a56f26890a69857f7515953e466d4df7515af8de827a895e2a394cd4e250",
|
||||
BlobIDs: []string{
|
||||
"sha256:7695efb9660d47bc53851aea5ca7d7e1bb1c90c22a18e8fd37b6d0634a03b69d",
|
||||
"sha256:8a71a56f26890a69857f7515953e466d4df7515af8de827a895e2a394cd4e250",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -616,9 +616,9 @@ func TestTerraformMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/terraform/multiple-failures/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:61728a22aeefbe2b0f30bdb01ee623cb16b64488eaa6e0b1d488a47b2bd4c3fb",
|
||||
ID: "sha256:8c3691ae9fee1a61cff411cb3c8337d5e9571ac6d5b40fba97f448983bfe8673",
|
||||
BlobIDs: []string{
|
||||
"sha256:61728a22aeefbe2b0f30bdb01ee623cb16b64488eaa6e0b1d488a47b2bd4c3fb",
|
||||
"sha256:8c3691ae9fee1a61cff411cb3c8337d5e9571ac6d5b40fba97f448983bfe8673",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -646,9 +646,9 @@ func TestTerraformMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/terraform/no-results/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:6612c1db6d6c52c11de53447264b552ee96bf9cc317de37b3374687a8fc4c4ac",
|
||||
ID: "sha256:1694d46ecb8151fde496faca988441a78c4fe40ddb3049f4f59467282ab9853e",
|
||||
BlobIDs: []string{
|
||||
"sha256:6612c1db6d6c52c11de53447264b552ee96bf9cc317de37b3374687a8fc4c4ac",
|
||||
"sha256:1694d46ecb8151fde496faca988441a78c4fe40ddb3049f4f59467282ab9853e",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -756,9 +756,9 @@ func TestTerraformMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/terraform/passed/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:0e792318cb431f2306399f28038a09f7ccbe3cb46d77f13b9f4c5da74fd03c61",
|
||||
ID: "sha256:f5c729597d94109d375447f91db212baf1a13a75f02084432b5e650be5643961",
|
||||
BlobIDs: []string{
|
||||
"sha256:0e792318cb431f2306399f28038a09f7ccbe3cb46d77f13b9f4c5da74fd03c61",
|
||||
"sha256:f5c729597d94109d375447f91db212baf1a13a75f02084432b5e650be5643961",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -793,289 +793,289 @@ func TestCloudFormationMisconfigurationScan(t *testing.T) {
|
||||
want types.ArtifactReference
|
||||
}{
|
||||
{
|
||||
name: "single failure",
|
||||
fields: fields{
|
||||
dir: "./testdata/misconfig/cloudformation/single-failure/src",
|
||||
},
|
||||
artifactOpt: artifact.Option{
|
||||
MisconfScannerOption: config.ScannerOption{
|
||||
RegoOnly: true,
|
||||
Namespaces: []string{"user"},
|
||||
PolicyPaths: []string{"./testdata/misconfig/cloudformation/single-failure/rego"},
|
||||
},
|
||||
},
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobIDAnything: true,
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: 2,
|
||||
Misconfigurations: []types.Misconfiguration{
|
||||
{
|
||||
FileType: "cloudformation",
|
||||
FilePath: "main.yaml",
|
||||
Successes: types.MisconfResults{
|
||||
{
|
||||
Namespace: "builtin.aws.rds.aws0176",
|
||||
Query: "data.builtin.aws.rds.aws0176.deny",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "N/A",
|
||||
AVDID: "AVD-AWS-0176",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "RDS IAM Database Authentication Disabled",
|
||||
Description: "Ensure IAM Database Authentication is enabled for RDS database instances to manage database access",
|
||||
Severity: "MEDIUM",
|
||||
RecommendedActions: "Modify the PostgreSQL and MySQL type RDS instances to enable IAM database authentication.",
|
||||
References: []string{"https://docs.aws.amazon.com/neptune/latest/userguide/iam-auth.html"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Provider: "AWS",
|
||||
Service: "rds",
|
||||
},
|
||||
},
|
||||
{
|
||||
Namespace: "builtin.aws.rds.aws0177",
|
||||
Query: "data.builtin.aws.rds.aws0177.deny",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "N/A",
|
||||
AVDID: "AVD-AWS-0177",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "RDS Deletion Protection Disabled",
|
||||
Description: "Ensure deletion protection is enabled for RDS database instances.",
|
||||
Severity: "MEDIUM",
|
||||
RecommendedActions: "Modify the RDS instances to enable deletion protection.",
|
||||
References: []string{"https://aws.amazon.com/about-aws/whats-new/2018/09/amazon-rds-now-provides-database-deletion-protection/"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Provider: "AWS",
|
||||
Service: "rds",
|
||||
},
|
||||
},
|
||||
{
|
||||
Namespace: "builtin.aws.rds.aws0180",
|
||||
Query: "data.builtin.aws.rds.aws0180.deny",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "N/A",
|
||||
AVDID: "AVD-AWS-0180",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "RDS Publicly Accessible",
|
||||
Description: "Ensures RDS instances are not launched into the public cloud.",
|
||||
Severity: "HIGH",
|
||||
RecommendedActions: "Remove the public endpoint from the RDS instance'",
|
||||
References: []string{"http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Resource: "", Provider: "AWS", Service: "rds", StartLine: 0, EndLine: 0,
|
||||
Code: types.Code{Lines: []types.Line(nil)},
|
||||
}, Traces: []string(nil),
|
||||
},
|
||||
},
|
||||
Failures: types.MisconfResults{
|
||||
{
|
||||
Namespace: "user.something",
|
||||
Query: "data.user.something.deny",
|
||||
Message: "No buckets allowed!",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "TEST001",
|
||||
AVDID: "AVD-TEST-0001",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "Test policy",
|
||||
Description: "This is a test policy.",
|
||||
Severity: "LOW",
|
||||
RecommendedActions: "Have a cup of tea.",
|
||||
References: []string{"https://trivy.dev/"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Resource: "main.yaml:3-6",
|
||||
Provider: "Generic",
|
||||
Service: "general",
|
||||
StartLine: 3,
|
||||
EndLine: 6,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Returns: cache.ArtifactCachePutBlobReturns{},
|
||||
},
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/cloudformation/single-failure/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:793d3e4cb82fa4d73e62267c358bd038b453fca36297064e5d240d5809ad241e",
|
||||
BlobIDs: []string{
|
||||
"sha256:793d3e4cb82fa4d73e62267c358bd038b453fca36297064e5d240d5809ad241e",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple failures",
|
||||
fields: fields{
|
||||
dir: "./testdata/misconfig/cloudformation/multiple-failures/src",
|
||||
},
|
||||
artifactOpt: artifact.Option{
|
||||
MisconfScannerOption: config.ScannerOption{
|
||||
RegoOnly: true,
|
||||
Namespaces: []string{"user"},
|
||||
PolicyPaths: []string{"./testdata/misconfig/cloudformation/multiple-failures/rego"},
|
||||
},
|
||||
},
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobIDAnything: true,
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: 2,
|
||||
Misconfigurations: []types.Misconfiguration{
|
||||
{
|
||||
FileType: "cloudformation",
|
||||
FilePath: "main.yaml",
|
||||
Successes: types.MisconfResults{
|
||||
{
|
||||
Namespace: "builtin.aws.rds.aws0176",
|
||||
Query: "data.builtin.aws.rds.aws0176.deny",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "N/A",
|
||||
AVDID: "AVD-AWS-0176",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "RDS IAM Database Authentication Disabled",
|
||||
Description: "Ensure IAM Database Authentication is enabled for RDS database instances to manage database access",
|
||||
Severity: "MEDIUM",
|
||||
RecommendedActions: "Modify the PostgreSQL and MySQL type RDS instances to enable IAM database authentication.",
|
||||
References: []string{"https://docs.aws.amazon.com/neptune/latest/userguide/iam-auth.html"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Provider: "AWS",
|
||||
Service: "rds",
|
||||
},
|
||||
},
|
||||
{
|
||||
Namespace: "builtin.aws.rds.aws0177",
|
||||
Query: "data.builtin.aws.rds.aws0177.deny",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "N/A",
|
||||
AVDID: "AVD-AWS-0177",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "RDS Deletion Protection Disabled",
|
||||
Description: "Ensure deletion protection is enabled for RDS database instances.",
|
||||
Severity: "MEDIUM",
|
||||
RecommendedActions: "Modify the RDS instances to enable deletion protection.",
|
||||
References: []string{"https://aws.amazon.com/about-aws/whats-new/2018/09/amazon-rds-now-provides-database-deletion-protection/"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Provider: "AWS",
|
||||
Service: "rds",
|
||||
},
|
||||
},
|
||||
{
|
||||
Namespace: "builtin.aws.rds.aws0180",
|
||||
Query: "data.builtin.aws.rds.aws0180.deny",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "N/A",
|
||||
AVDID: "AVD-AWS-0180",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "RDS Publicly Accessible",
|
||||
Description: "Ensures RDS instances are not launched into the public cloud.",
|
||||
Severity: "HIGH",
|
||||
RecommendedActions: "Remove the public endpoint from the RDS instance'",
|
||||
References: []string{"http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Resource: "", Provider: "AWS", Service: "rds", StartLine: 0, EndLine: 0,
|
||||
Code: types.Code{Lines: []types.Line(nil)},
|
||||
}, Traces: []string(nil),
|
||||
},
|
||||
},
|
||||
Failures: types.MisconfResults{
|
||||
types.MisconfResult{
|
||||
Namespace: "user.something",
|
||||
Query: "data.user.something.deny",
|
||||
Message: "No buckets allowed!",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "TEST001",
|
||||
AVDID: "AVD-TEST-0001",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "Test policy",
|
||||
Description: "This is a test policy.",
|
||||
Severity: "LOW",
|
||||
RecommendedActions: "Have a cup of tea.",
|
||||
References: []string{"https://trivy.dev/"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Resource: "main.yaml:2-5",
|
||||
Provider: "Generic",
|
||||
Service: "general",
|
||||
StartLine: 2,
|
||||
EndLine: 5,
|
||||
},
|
||||
},
|
||||
{
|
||||
Namespace: "user.something",
|
||||
Query: "data.user.something.deny",
|
||||
Message: "No buckets allowed!",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "TEST001",
|
||||
AVDID: "AVD-TEST-0001",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "Test policy",
|
||||
Description: "This is a test policy.",
|
||||
Severity: "LOW",
|
||||
RecommendedActions: "Have a cup of tea.",
|
||||
References: []string{"https://trivy.dev/"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Resource: "main.yaml:6-9",
|
||||
Provider: "Generic",
|
||||
Service: "general",
|
||||
StartLine: 6,
|
||||
EndLine: 9,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Returns: cache.ArtifactCachePutBlobReturns{},
|
||||
},
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/cloudformation/multiple-failures/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:49edf1eecd461fd56eccb1221aaff26c0c5939f2d8128e9cb867cc8e7552b8aa",
|
||||
BlobIDs: []string{
|
||||
"sha256:49edf1eecd461fd56eccb1221aaff26c0c5939f2d8128e9cb867cc8e7552b8aa",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no results",
|
||||
fields: fields{
|
||||
dir: "./testdata/misconfig/cloudformation/no-results/src",
|
||||
},
|
||||
artifactOpt: artifact.Option{
|
||||
MisconfScannerOption: config.ScannerOption{
|
||||
RegoOnly: true,
|
||||
Namespaces: []string{"user"},
|
||||
PolicyPaths: []string{"./testdata/misconfig/cloudformation/no-results/rego"},
|
||||
},
|
||||
},
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobIDAnything: true,
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
},
|
||||
},
|
||||
Returns: cache.ArtifactCachePutBlobReturns{},
|
||||
},
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/cloudformation/no-results/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:6612c1db6d6c52c11de53447264b552ee96bf9cc317de37b3374687a8fc4c4ac",
|
||||
BlobIDs: []string{
|
||||
"sha256:6612c1db6d6c52c11de53447264b552ee96bf9cc317de37b3374687a8fc4c4ac",
|
||||
},
|
||||
},
|
||||
},
|
||||
name: "single failure",
|
||||
fields: fields{
|
||||
dir: "./testdata/misconfig/cloudformation/single-failure/src",
|
||||
},
|
||||
artifactOpt: artifact.Option{
|
||||
MisconfScannerOption: config.ScannerOption{
|
||||
RegoOnly: true,
|
||||
Namespaces: []string{"user"},
|
||||
PolicyPaths: []string{"./testdata/misconfig/cloudformation/single-failure/rego"},
|
||||
},
|
||||
},
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobIDAnything: true,
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: 2,
|
||||
Misconfigurations: []types.Misconfiguration{
|
||||
{
|
||||
FileType: "cloudformation",
|
||||
FilePath: "main.yaml",
|
||||
Successes: types.MisconfResults{
|
||||
{
|
||||
Namespace: "builtin.aws.rds.aws0176",
|
||||
Query: "data.builtin.aws.rds.aws0176.deny",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "N/A",
|
||||
AVDID: "AVD-AWS-0176",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "RDS IAM Database Authentication Disabled",
|
||||
Description: "Ensure IAM Database Authentication is enabled for RDS database instances to manage database access",
|
||||
Severity: "MEDIUM",
|
||||
RecommendedActions: "Modify the PostgreSQL and MySQL type RDS instances to enable IAM database authentication.",
|
||||
References: []string{"https://docs.aws.amazon.com/neptune/latest/userguide/iam-auth.html"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Provider: "AWS",
|
||||
Service: "rds",
|
||||
},
|
||||
},
|
||||
{
|
||||
Namespace: "builtin.aws.rds.aws0177",
|
||||
Query: "data.builtin.aws.rds.aws0177.deny",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "N/A",
|
||||
AVDID: "AVD-AWS-0177",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "RDS Deletion Protection Disabled",
|
||||
Description: "Ensure deletion protection is enabled for RDS database instances.",
|
||||
Severity: "MEDIUM",
|
||||
RecommendedActions: "Modify the RDS instances to enable deletion protection.",
|
||||
References: []string{"https://aws.amazon.com/about-aws/whats-new/2018/09/amazon-rds-now-provides-database-deletion-protection/"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Provider: "AWS",
|
||||
Service: "rds",
|
||||
},
|
||||
},
|
||||
{
|
||||
Namespace: "builtin.aws.rds.aws0180",
|
||||
Query: "data.builtin.aws.rds.aws0180.deny",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "N/A",
|
||||
AVDID: "AVD-AWS-0180",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "RDS Publicly Accessible",
|
||||
Description: "Ensures RDS instances are not launched into the public cloud.",
|
||||
Severity: "HIGH",
|
||||
RecommendedActions: "Remove the public endpoint from the RDS instance'",
|
||||
References: []string{"http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Resource: "", Provider: "AWS", Service: "rds", StartLine: 0, EndLine: 0,
|
||||
Code: types.Code{Lines: []types.Line(nil)},
|
||||
}, Traces: []string(nil),
|
||||
},
|
||||
},
|
||||
Failures: types.MisconfResults{
|
||||
{
|
||||
Namespace: "user.something",
|
||||
Query: "data.user.something.deny",
|
||||
Message: "No buckets allowed!",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "TEST001",
|
||||
AVDID: "AVD-TEST-0001",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "Test policy",
|
||||
Description: "This is a test policy.",
|
||||
Severity: "LOW",
|
||||
RecommendedActions: "Have a cup of tea.",
|
||||
References: []string{"https://trivy.dev/"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Resource: "main.yaml:3-6",
|
||||
Provider: "Generic",
|
||||
Service: "general",
|
||||
StartLine: 3,
|
||||
EndLine: 6,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Returns: cache.ArtifactCachePutBlobReturns{},
|
||||
},
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/cloudformation/single-failure/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:eb5454a2e393c2f7602d0905f29179767dbcf7a5e57ee23142acbbb9c748e511",
|
||||
BlobIDs: []string{
|
||||
"sha256:eb5454a2e393c2f7602d0905f29179767dbcf7a5e57ee23142acbbb9c748e511",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple failures",
|
||||
fields: fields{
|
||||
dir: "./testdata/misconfig/cloudformation/multiple-failures/src",
|
||||
},
|
||||
artifactOpt: artifact.Option{
|
||||
MisconfScannerOption: config.ScannerOption{
|
||||
RegoOnly: true,
|
||||
Namespaces: []string{"user"},
|
||||
PolicyPaths: []string{"./testdata/misconfig/cloudformation/multiple-failures/rego"},
|
||||
},
|
||||
},
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobIDAnything: true,
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: 2,
|
||||
Misconfigurations: []types.Misconfiguration{
|
||||
{
|
||||
FileType: "cloudformation",
|
||||
FilePath: "main.yaml",
|
||||
Successes: types.MisconfResults{
|
||||
{
|
||||
Namespace: "builtin.aws.rds.aws0176",
|
||||
Query: "data.builtin.aws.rds.aws0176.deny",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "N/A",
|
||||
AVDID: "AVD-AWS-0176",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "RDS IAM Database Authentication Disabled",
|
||||
Description: "Ensure IAM Database Authentication is enabled for RDS database instances to manage database access",
|
||||
Severity: "MEDIUM",
|
||||
RecommendedActions: "Modify the PostgreSQL and MySQL type RDS instances to enable IAM database authentication.",
|
||||
References: []string{"https://docs.aws.amazon.com/neptune/latest/userguide/iam-auth.html"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Provider: "AWS",
|
||||
Service: "rds",
|
||||
},
|
||||
},
|
||||
{
|
||||
Namespace: "builtin.aws.rds.aws0177",
|
||||
Query: "data.builtin.aws.rds.aws0177.deny",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "N/A",
|
||||
AVDID: "AVD-AWS-0177",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "RDS Deletion Protection Disabled",
|
||||
Description: "Ensure deletion protection is enabled for RDS database instances.",
|
||||
Severity: "MEDIUM",
|
||||
RecommendedActions: "Modify the RDS instances to enable deletion protection.",
|
||||
References: []string{"https://aws.amazon.com/about-aws/whats-new/2018/09/amazon-rds-now-provides-database-deletion-protection/"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Provider: "AWS",
|
||||
Service: "rds",
|
||||
},
|
||||
},
|
||||
{
|
||||
Namespace: "builtin.aws.rds.aws0180",
|
||||
Query: "data.builtin.aws.rds.aws0180.deny",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "N/A",
|
||||
AVDID: "AVD-AWS-0180",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "RDS Publicly Accessible",
|
||||
Description: "Ensures RDS instances are not launched into the public cloud.",
|
||||
Severity: "HIGH",
|
||||
RecommendedActions: "Remove the public endpoint from the RDS instance'",
|
||||
References: []string{"http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Resource: "", Provider: "AWS", Service: "rds", StartLine: 0, EndLine: 0,
|
||||
Code: types.Code{Lines: []types.Line(nil)},
|
||||
}, Traces: []string(nil),
|
||||
},
|
||||
},
|
||||
Failures: types.MisconfResults{
|
||||
types.MisconfResult{
|
||||
Namespace: "user.something",
|
||||
Query: "data.user.something.deny",
|
||||
Message: "No buckets allowed!",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "TEST001",
|
||||
AVDID: "AVD-TEST-0001",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "Test policy",
|
||||
Description: "This is a test policy.",
|
||||
Severity: "LOW",
|
||||
RecommendedActions: "Have a cup of tea.",
|
||||
References: []string{"https://trivy.dev/"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Resource: "main.yaml:2-5",
|
||||
Provider: "Generic",
|
||||
Service: "general",
|
||||
StartLine: 2,
|
||||
EndLine: 5,
|
||||
},
|
||||
},
|
||||
{
|
||||
Namespace: "user.something",
|
||||
Query: "data.user.something.deny",
|
||||
Message: "No buckets allowed!",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
ID: "TEST001",
|
||||
AVDID: "AVD-TEST-0001",
|
||||
Type: "CloudFormation Security Check",
|
||||
Title: "Test policy",
|
||||
Description: "This is a test policy.",
|
||||
Severity: "LOW",
|
||||
RecommendedActions: "Have a cup of tea.",
|
||||
References: []string{"https://trivy.dev/"},
|
||||
},
|
||||
CauseMetadata: types.CauseMetadata{
|
||||
Resource: "main.yaml:6-9",
|
||||
Provider: "Generic",
|
||||
Service: "general",
|
||||
StartLine: 6,
|
||||
EndLine: 9,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Returns: cache.ArtifactCachePutBlobReturns{},
|
||||
},
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/cloudformation/multiple-failures/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:9dfc40f988fcfbb90d6da1dedaad0fd83a652b7562e2cd2e4cb30afb72cdc93c",
|
||||
BlobIDs: []string{
|
||||
"sha256:9dfc40f988fcfbb90d6da1dedaad0fd83a652b7562e2cd2e4cb30afb72cdc93c",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no results",
|
||||
fields: fields{
|
||||
dir: "./testdata/misconfig/cloudformation/no-results/src",
|
||||
},
|
||||
artifactOpt: artifact.Option{
|
||||
MisconfScannerOption: config.ScannerOption{
|
||||
RegoOnly: true,
|
||||
Namespaces: []string{"user"},
|
||||
PolicyPaths: []string{"./testdata/misconfig/cloudformation/no-results/rego"},
|
||||
},
|
||||
},
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobIDAnything: true,
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
},
|
||||
},
|
||||
Returns: cache.ArtifactCachePutBlobReturns{},
|
||||
},
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/cloudformation/no-results/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:1694d46ecb8151fde496faca988441a78c4fe40ddb3049f4f59467282ab9853e",
|
||||
BlobIDs: []string{
|
||||
"sha256:1694d46ecb8151fde496faca988441a78c4fe40ddb3049f4f59467282ab9853e",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "passed",
|
||||
fields: fields{
|
||||
@@ -1180,9 +1180,9 @@ func TestCloudFormationMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/cloudformation/passed/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:a923fba51d802d1634246662e2e674b4abbce3ed796c8cfd4839f287dfd9033e",
|
||||
ID: "sha256:f47eda75dac78ecc9ed4cd02143cdef7145000fc55064f9117ade7f92f55922f",
|
||||
BlobIDs: []string{
|
||||
"sha256:a923fba51d802d1634246662e2e674b4abbce3ed796c8cfd4839f287dfd9033e",
|
||||
"sha256:f47eda75dac78ecc9ed4cd02143cdef7145000fc55064f9117ade7f92f55922f",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1267,9 +1267,9 @@ func TestDockerfileMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/dockerfile/single-failure/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:acf53660fed1eb7961e3c47f85c8f41a117f7df7a0c09221f6d84fc64737e361",
|
||||
ID: "sha256:80337e1de2fb019bd8e43c88cb532f4715cf58384063ef7c63ef5f55e7eb4a5c",
|
||||
BlobIDs: []string{
|
||||
"sha256:acf53660fed1eb7961e3c47f85c8f41a117f7df7a0c09221f6d84fc64737e361",
|
||||
"sha256:80337e1de2fb019bd8e43c88cb532f4715cf58384063ef7c63ef5f55e7eb4a5c",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1324,9 +1324,9 @@ func TestDockerfileMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/dockerfile/multiple-failures/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:acf53660fed1eb7961e3c47f85c8f41a117f7df7a0c09221f6d84fc64737e361",
|
||||
ID: "sha256:80337e1de2fb019bd8e43c88cb532f4715cf58384063ef7c63ef5f55e7eb4a5c",
|
||||
BlobIDs: []string{
|
||||
"sha256:acf53660fed1eb7961e3c47f85c8f41a117f7df7a0c09221f6d84fc64737e361",
|
||||
"sha256:80337e1de2fb019bd8e43c88cb532f4715cf58384063ef7c63ef5f55e7eb4a5c",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1354,9 +1354,9 @@ func TestDockerfileMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/dockerfile/no-results/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:6612c1db6d6c52c11de53447264b552ee96bf9cc317de37b3374687a8fc4c4ac",
|
||||
ID: "sha256:1694d46ecb8151fde496faca988441a78c4fe40ddb3049f4f59467282ab9853e",
|
||||
BlobIDs: []string{
|
||||
"sha256:6612c1db6d6c52c11de53447264b552ee96bf9cc317de37b3374687a8fc4c4ac",
|
||||
"sha256:1694d46ecb8151fde496faca988441a78c4fe40ddb3049f4f59467282ab9853e",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1413,9 +1413,9 @@ func TestDockerfileMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/dockerfile/passed/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:78a5071a951a980a53a0df7818384eda36fedd2a0237529a43e12979d3bf36f9",
|
||||
ID: "sha256:165d6b849191f10ab1e2834cea9da9decbd6bf005efdb2e4afcef6df0ec53955",
|
||||
BlobIDs: []string{
|
||||
"sha256:78a5071a951a980a53a0df7818384eda36fedd2a0237529a43e12979d3bf36f9",
|
||||
"sha256:165d6b849191f10ab1e2834cea9da9decbd6bf005efdb2e4afcef6df0ec53955",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1505,9 +1505,9 @@ func TestKubernetesMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/kubernetes/single-failure/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:2a60e6a9b9cf1ab3c083f4e52a38d3c70026ab331b771d449b06f4ffd4b6f2dd",
|
||||
ID: "sha256:6502a485fddeaac944a70b7e25dec5a779ae7dc10a64dbb8acfc08bec5a207a0",
|
||||
BlobIDs: []string{
|
||||
"sha256:2a60e6a9b9cf1ab3c083f4e52a38d3c70026ab331b771d449b06f4ffd4b6f2dd",
|
||||
"sha256:6502a485fddeaac944a70b7e25dec5a779ae7dc10a64dbb8acfc08bec5a207a0",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1590,9 +1590,9 @@ func TestKubernetesMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/kubernetes/multiple-failures/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:56675c845f72190c9a6277d51a0c8248768d5322ea0d92650d1cc179f20d920e",
|
||||
ID: "sha256:12db0860b146463e15a2e5143742c7268e1de1d3f3655f669891d7f532934734",
|
||||
BlobIDs: []string{
|
||||
"sha256:56675c845f72190c9a6277d51a0c8248768d5322ea0d92650d1cc179f20d920e",
|
||||
"sha256:12db0860b146463e15a2e5143742c7268e1de1d3f3655f669891d7f532934734",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1620,9 +1620,9 @@ func TestKubernetesMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/kubernetes/no-results/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:f1bc1b154a70ae2e1d94297ffcf721348d1975037ccd4a32f4f1157738cbe54d",
|
||||
ID: "sha256:20043d42935fe45a25fd24949d6efad9d7fd52674bad6b8d29a4af97ed485e7a",
|
||||
BlobIDs: []string{
|
||||
"sha256:f1bc1b154a70ae2e1d94297ffcf721348d1975037ccd4a32f4f1157738cbe54d",
|
||||
"sha256:20043d42935fe45a25fd24949d6efad9d7fd52674bad6b8d29a4af97ed485e7a",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1679,9 +1679,9 @@ func TestKubernetesMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/kubernetes/passed/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:64fd37028d8cb4aefa49d6fa8438fa3a7e08ca331bfdfad22faf91e31ca0ff29",
|
||||
ID: "sha256:7f3de08246eabb3277e4ec95e65d8e15b6fe4b50eb0414fd043690b94c08cbb3",
|
||||
BlobIDs: []string{
|
||||
"sha256:64fd37028d8cb4aefa49d6fa8438fa3a7e08ca331bfdfad22faf91e31ca0ff29",
|
||||
"sha256:7f3de08246eabb3277e4ec95e65d8e15b6fe4b50eb0414fd043690b94c08cbb3",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1773,7 +1773,7 @@ func TestAzureARMMisconfigurationScan(t *testing.T) {
|
||||
Service: "rds",
|
||||
},
|
||||
},
|
||||
{
|
||||
{
|
||||
Namespace: "builtin.aws.rds.aws0180",
|
||||
Query: "data.builtin.aws.rds.aws0180.deny",
|
||||
PolicyMetadata: types.PolicyMetadata{
|
||||
@@ -1825,9 +1825,9 @@ func TestAzureARMMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/azurearm/single-failure/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:50155d7398d717aac20a616af8ac17964d20a24f5423b868871005dfa2cf4a61",
|
||||
ID: "sha256:da3e9be7246410885dd6b8d994c5e757cd3f700367bd0f7790990c124cb69924",
|
||||
BlobIDs: []string{
|
||||
"sha256:50155d7398d717aac20a616af8ac17964d20a24f5423b868871005dfa2cf4a61",
|
||||
"sha256:da3e9be7246410885dd6b8d994c5e757cd3f700367bd0f7790990c124cb69924",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1963,9 +1963,9 @@ func TestAzureARMMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/azurearm/multiple-failures/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:e31c260a87a099d00acc76b7afe5d6a88e18c5e0fd26153d15e1b4f491b7c42c",
|
||||
ID: "sha256:6b50622ac08712437f5446e23da6d219ac279cc683526b76975dc2963c84f65d",
|
||||
BlobIDs: []string{
|
||||
"sha256:e31c260a87a099d00acc76b7afe5d6a88e18c5e0fd26153d15e1b4f491b7c42c",
|
||||
"sha256:6b50622ac08712437f5446e23da6d219ac279cc683526b76975dc2963c84f65d",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1993,9 +1993,9 @@ func TestAzureARMMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/azurearm/no-results/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:6612c1db6d6c52c11de53447264b552ee96bf9cc317de37b3374687a8fc4c4ac",
|
||||
ID: "sha256:1694d46ecb8151fde496faca988441a78c4fe40ddb3049f4f59467282ab9853e",
|
||||
BlobIDs: []string{
|
||||
"sha256:6612c1db6d6c52c11de53447264b552ee96bf9cc317de37b3374687a8fc4c4ac",
|
||||
"sha256:1694d46ecb8151fde496faca988441a78c4fe40ddb3049f4f59467282ab9853e",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2102,9 +2102,9 @@ func TestAzureARMMisconfigurationScan(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/misconfig/azurearm/passed/src",
|
||||
Type: types.ArtifactFilesystem,
|
||||
ID: "sha256:e9289e2efc545895a2199fab4583d5f3ef52c20eda1afcf4b0505bb2014ba3e4",
|
||||
ID: "sha256:224e7796d0417367f334df40092b2910cab0d0e9ea2be0d90347b199c94e51ad",
|
||||
BlobIDs: []string{
|
||||
"sha256:e9289e2efc545895a2199fab4583d5f3ef52c20eda1afcf4b0505bb2014ba3e4",
|
||||
"sha256:224e7796d0417367f334df40092b2910cab0d0e9ea2be0d90347b199c94e51ad",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -184,9 +184,9 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: ts.URL + "/test.git",
|
||||
Type: types.ArtifactRemoteRepository,
|
||||
ID: "sha256:aae4252ed9791dd412258f6235fe6f39807ec3f5ee7ac229743c940f07aa8625",
|
||||
ID: "sha256:43256f1a50997b78fd91690ac248cde42d56ca996201a596282e9d84e1dccaeb",
|
||||
BlobIDs: []string{
|
||||
"sha256:aae4252ed9791dd412258f6235fe6f39807ec3f5ee7ac229743c940f07aa8625",
|
||||
"sha256:43256f1a50997b78fd91690ac248cde42d56ca996201a596282e9d84e1dccaeb",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -29,7 +29,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
filePath: filepath.Join("testdata", "bom.json"),
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:21f10e5ab97c37f6c4d6a45815cd5db10e9539d5db8614d3b1d8890111d7a2b8",
|
||||
BlobID: "sha256:f02a38a70e35a84032402711b68c75c6aafa1f77a01506a8e974cefd40e9038b",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
OS: types.OS{
|
||||
@@ -125,9 +125,9 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: filepath.Join("testdata", "bom.json"),
|
||||
Type: types.ArtifactCycloneDX,
|
||||
ID: "sha256:21f10e5ab97c37f6c4d6a45815cd5db10e9539d5db8614d3b1d8890111d7a2b8",
|
||||
ID: "sha256:f02a38a70e35a84032402711b68c75c6aafa1f77a01506a8e974cefd40e9038b",
|
||||
BlobIDs: []string{
|
||||
"sha256:21f10e5ab97c37f6c4d6a45815cd5db10e9539d5db8614d3b1d8890111d7a2b8",
|
||||
"sha256:f02a38a70e35a84032402711b68c75c6aafa1f77a01506a8e974cefd40e9038b",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -136,7 +136,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
filePath: filepath.Join("testdata", "sbom.cdx.intoto.jsonl"),
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:21f10e5ab97c37f6c4d6a45815cd5db10e9539d5db8614d3b1d8890111d7a2b8",
|
||||
BlobID: "sha256:f02a38a70e35a84032402711b68c75c6aafa1f77a01506a8e974cefd40e9038b",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
OS: types.OS{
|
||||
@@ -232,9 +232,9 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: filepath.Join("testdata", "sbom.cdx.intoto.jsonl"),
|
||||
Type: types.ArtifactCycloneDX,
|
||||
ID: "sha256:21f10e5ab97c37f6c4d6a45815cd5db10e9539d5db8614d3b1d8890111d7a2b8",
|
||||
ID: "sha256:f02a38a70e35a84032402711b68c75c6aafa1f77a01506a8e974cefd40e9038b",
|
||||
BlobIDs: []string{
|
||||
"sha256:21f10e5ab97c37f6c4d6a45815cd5db10e9539d5db8614d3b1d8890111d7a2b8",
|
||||
"sha256:f02a38a70e35a84032402711b68c75c6aafa1f77a01506a8e974cefd40e9038b",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -248,7 +248,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
filePath: filepath.Join("testdata", "os-only-bom.json"),
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:05a4e94bb5503e437108210c90849a977ea0b9b83e4e8606aabc9647b2a5256c",
|
||||
BlobID: "sha256:033dc76e6daf7d8ba439d678dc7e33400687098f3e9f563f6975adf4eb440eee",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
OS: types.OS{
|
||||
|
||||
@@ -86,7 +86,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
filePath: "testdata/AmazonLinux2.img.gz",
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:bdff805a4b2a96074c549dbb7912f5089df1a484cf0919639ecdba437a959e90",
|
||||
BlobID: "sha256:4289951ca507f1d2e3e5428f018bde5e94684ee3f6e0aa7d72456b1283478178",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
OS: types.OS{
|
||||
@@ -106,7 +106,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
putArtifactExpectations: []cache.ArtifactCachePutArtifactExpectation{
|
||||
{
|
||||
Args: cache.ArtifactCachePutArtifactArgs{
|
||||
ArtifactID: "sha256:bdff805a4b2a96074c549dbb7912f5089df1a484cf0919639ecdba437a959e90",
|
||||
ArtifactID: "sha256:4289951ca507f1d2e3e5428f018bde5e94684ee3f6e0aa7d72456b1283478178",
|
||||
ArtifactInfo: types.ArtifactInfo{
|
||||
SchemaVersion: types.ArtifactJSONSchemaVersion,
|
||||
},
|
||||
@@ -117,9 +117,9 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "testdata/AmazonLinux2.img.gz",
|
||||
Type: types.ArtifactVM,
|
||||
ID: "sha256:bdff805a4b2a96074c549dbb7912f5089df1a484cf0919639ecdba437a959e90",
|
||||
ID: "sha256:4289951ca507f1d2e3e5428f018bde5e94684ee3f6e0aa7d72456b1283478178",
|
||||
BlobIDs: []string{
|
||||
"sha256:bdff805a4b2a96074c549dbb7912f5089df1a484cf0919639ecdba437a959e90",
|
||||
"sha256:4289951ca507f1d2e3e5428f018bde5e94684ee3f6e0aa7d72456b1283478178",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -128,13 +128,13 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
filePath: "ebs:ebs-012345",
|
||||
missingBlobsExpectation: cache.ArtifactCacheMissingBlobsExpectation{
|
||||
Args: cache.ArtifactCacheMissingBlobsArgs{
|
||||
ArtifactID: "sha256:284fbc20c2224e9ffc9dbc2fa1cdc4138fcfd5c55763ecb737864c0ee0d8163f",
|
||||
BlobIDs: []string{"sha256:284fbc20c2224e9ffc9dbc2fa1cdc4138fcfd5c55763ecb737864c0ee0d8163f"},
|
||||
ArtifactID: "sha256:f26b9c7c836259bd2d11516c755a7aec8e94bbfa7588f98b491bc9b0ca03df73",
|
||||
BlobIDs: []string{"sha256:f26b9c7c836259bd2d11516c755a7aec8e94bbfa7588f98b491bc9b0ca03df73"},
|
||||
},
|
||||
},
|
||||
putBlobExpectation: cache.ArtifactCachePutBlobExpectation{
|
||||
Args: cache.ArtifactCachePutBlobArgs{
|
||||
BlobID: "sha256:284fbc20c2224e9ffc9dbc2fa1cdc4138fcfd5c55763ecb737864c0ee0d8163f",
|
||||
BlobID: "sha256:f26b9c7c836259bd2d11516c755a7aec8e94bbfa7588f98b491bc9b0ca03df73",
|
||||
BlobInfo: types.BlobInfo{
|
||||
SchemaVersion: types.BlobJSONSchemaVersion,
|
||||
OS: types.OS{
|
||||
@@ -154,7 +154,7 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
putArtifactExpectations: []cache.ArtifactCachePutArtifactExpectation{
|
||||
{
|
||||
Args: cache.ArtifactCachePutArtifactArgs{
|
||||
ArtifactID: "sha256:284fbc20c2224e9ffc9dbc2fa1cdc4138fcfd5c55763ecb737864c0ee0d8163f",
|
||||
ArtifactID: "sha256:f26b9c7c836259bd2d11516c755a7aec8e94bbfa7588f98b491bc9b0ca03df73",
|
||||
ArtifactInfo: types.ArtifactInfo{
|
||||
SchemaVersion: types.ArtifactJSONSchemaVersion,
|
||||
},
|
||||
@@ -164,9 +164,9 @@ func TestArtifact_Inspect(t *testing.T) {
|
||||
want: types.ArtifactReference{
|
||||
Name: "ebs-012345",
|
||||
Type: types.ArtifactVM,
|
||||
ID: "sha256:284fbc20c2224e9ffc9dbc2fa1cdc4138fcfd5c55763ecb737864c0ee0d8163f",
|
||||
ID: "sha256:f26b9c7c836259bd2d11516c755a7aec8e94bbfa7588f98b491bc9b0ca03df73",
|
||||
BlobIDs: []string{
|
||||
"sha256:284fbc20c2224e9ffc9dbc2fa1cdc4138fcfd5c55763ecb737864c0ee0d8163f",
|
||||
"sha256:f26b9c7c836259bd2d11516c755a7aec8e94bbfa7588f98b491bc9b0ca03df73",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
9
pkg/fanal/cache/key.go
vendored
9
pkg/fanal/cache/key.go
vendored
@@ -5,13 +5,14 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/artifact"
|
||||
|
||||
"golang.org/x/mod/sumdb/dirhash"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/analyzer"
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/artifact"
|
||||
)
|
||||
|
||||
func CalcKey(id string, analyzerVersions, hookVersions map[string]int, artifactOpt artifact.Option) (string, error) {
|
||||
func CalcKey(id string, analyzerVersions analyzer.Versions, hookVersions map[string]int, artifactOpt artifact.Option) (string, error) {
|
||||
// Sort options for consistent results
|
||||
artifactOpt.Sort()
|
||||
artifactOpt.MisconfScannerOption.Sort()
|
||||
@@ -21,7 +22,7 @@ func CalcKey(id string, analyzerVersions, hookVersions map[string]int, artifactO
|
||||
// Write ID, analyzer/handler versions, skipped files/dirs and file patterns
|
||||
keyBase := struct {
|
||||
ID string
|
||||
AnalyzerVersions map[string]int
|
||||
AnalyzerVersions analyzer.Versions
|
||||
HookVersions map[string]int
|
||||
SkipFiles []string
|
||||
SkipDirs []string
|
||||
|
||||
103
pkg/fanal/cache/key_test.go
vendored
103
pkg/fanal/cache/key_test.go
vendored
@@ -1,6 +1,7 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/analyzer"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -13,7 +14,7 @@ import (
|
||||
func TestCalcKey(t *testing.T) {
|
||||
type args struct {
|
||||
key string
|
||||
analyzerVersions map[string]int
|
||||
analyzerVersions analyzer.Versions
|
||||
hookVersions map[string]int
|
||||
skipFiles []string
|
||||
skipDirs []string
|
||||
@@ -31,124 +32,144 @@ func TestCalcKey(t *testing.T) {
|
||||
name: "happy path",
|
||||
args: args{
|
||||
key: "sha256:5c534be56eca62e756ef2ef51523feda0f19cd7c15bb0c015e3d6e3ae090bf6e",
|
||||
analyzerVersions: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
analyzerVersions: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
},
|
||||
},
|
||||
hookVersions: map[string]int{
|
||||
"python-pkg": 1,
|
||||
},
|
||||
},
|
||||
want: "sha256:8060f9cc9ba29039785a7116ae874673ad7a6eab37170ee1375b4064a72343ae",
|
||||
want: "sha256:c720b502991465ea11929cfefc71cf4b5aeaa9a8c0ae59fdaf597f957f5cdb18",
|
||||
},
|
||||
{
|
||||
name: "with disabled analyzer",
|
||||
args: args{
|
||||
key: "sha256:5c534be56eca62e756ef2ef51523feda0f19cd7c15bb0c015e3d6e3ae090bf6e",
|
||||
analyzerVersions: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 0,
|
||||
"redhat": 2,
|
||||
analyzerVersions: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 0,
|
||||
"redhat": 2,
|
||||
},
|
||||
},
|
||||
hookVersions: map[string]int{
|
||||
"python-pkg": 1,
|
||||
},
|
||||
},
|
||||
want: "sha256:e6a28d20a3a901377dcb836959c8ac268ec573735a5ba9c29112a1f6c5b1edd2",
|
||||
want: "sha256:d63724cc72729edd3c81205739d64fcb414a4e6345dd4dde7f0fe6bdd56bedf9",
|
||||
},
|
||||
{
|
||||
name: "with empty slice file patterns",
|
||||
args: args{
|
||||
key: "sha256:5c534be56eca62e756ef2ef51523feda0f19cd7c15bb0c015e3d6e3ae090bf6e",
|
||||
analyzerVersions: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
analyzerVersions: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
},
|
||||
},
|
||||
patterns: []string{},
|
||||
},
|
||||
want: "sha256:d69f13df33f4c159b4ea54c1967384782fcefb5e2a19af35f4cd6d2896e9285e",
|
||||
want: "sha256:9f7afa4d27c4c4f371dc6bb47bcc09e7a4a00b1d870e8156f126e35d8f6522e6",
|
||||
},
|
||||
{
|
||||
name: "with single empty string in file patterns",
|
||||
args: args{
|
||||
key: "sha256:5c534be56eca62e756ef2ef51523feda0f19cd7c15bb0c015e3d6e3ae090bf6e",
|
||||
analyzerVersions: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
analyzerVersions: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
},
|
||||
},
|
||||
patterns: []string{""},
|
||||
},
|
||||
want: "sha256:9b81e0bf3aa7809a0f41bc696f353fca5645bcb63b975ab30e23d81886df2e61",
|
||||
want: "sha256:bcfc5da13ef9bf0b85e719584800a010063474546f1051a781b78bd83de01102",
|
||||
},
|
||||
{
|
||||
name: "with single non empty string in file patterns",
|
||||
args: args{
|
||||
key: "sha256:5c534be56eca62e756ef2ef51523feda0f19cd7c15bb0c015e3d6e3ae090bf6e",
|
||||
analyzerVersions: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
analyzerVersions: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
},
|
||||
},
|
||||
patterns: []string{"test"},
|
||||
},
|
||||
want: "sha256:7d91b2623ae4b5641a1f36efa59c774231efe8c28c27a03869894fd49b047fe8",
|
||||
want: "sha256:8c9750b8eca507628417f21d7db707a7876d2e22c3e75b13f31a795af4051c57",
|
||||
},
|
||||
{
|
||||
name: "with non empty followed by empty string in file patterns",
|
||||
args: args{
|
||||
key: "sha256:5c534be56eca62e756ef2ef51523feda0f19cd7c15bb0c015e3d6e3ae090bf6e",
|
||||
analyzerVersions: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
analyzerVersions: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
},
|
||||
},
|
||||
patterns: []string{"test", ""},
|
||||
},
|
||||
want: "sha256:5c7f1555e95fc60cdaa7e92e99aee15ee7be356fad9e83f1c24a3be06713a5a8",
|
||||
want: "sha256:71abf09bf1422531e2838db692b80f9b9f48766f56b7d3d02aecdb36b019e103",
|
||||
},
|
||||
{
|
||||
name: "with non empty preceded by empty string in file patterns",
|
||||
args: args{
|
||||
key: "sha256:5c534be56eca62e756ef2ef51523feda0f19cd7c15bb0c015e3d6e3ae090bf6e",
|
||||
analyzerVersions: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
analyzerVersions: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
},
|
||||
},
|
||||
patterns: []string{"", "test"},
|
||||
},
|
||||
want: "sha256:5c7f1555e95fc60cdaa7e92e99aee15ee7be356fad9e83f1c24a3be06713a5a8",
|
||||
want: "sha256:71abf09bf1422531e2838db692b80f9b9f48766f56b7d3d02aecdb36b019e103",
|
||||
},
|
||||
{
|
||||
name: "with policy",
|
||||
args: args{
|
||||
key: "sha256:5c534be56eca62e756ef2ef51523feda0f19cd7c15bb0c015e3d6e3ae090bf6e",
|
||||
analyzerVersions: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
analyzerVersions: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
},
|
||||
},
|
||||
policy: []string{"testdata/policy"},
|
||||
},
|
||||
want: "sha256:96e90ded238ad2ea8e1fd53a4202247aa65b69ad5e2f9f60d883104865ca4821",
|
||||
want: "sha256:9602d5ef5af086112cc9fae8310390ed3fb79f4b309d8881b9807e379c8dfa57",
|
||||
},
|
||||
{
|
||||
name: "skip files and dirs",
|
||||
args: args{
|
||||
key: "sha256:5c534be56eca62e756ef2ef51523feda0f19cd7c15bb0c015e3d6e3ae090bf6e",
|
||||
analyzerVersions: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
analyzerVersions: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
},
|
||||
},
|
||||
skipFiles: []string{"app/deployment.yaml"},
|
||||
skipDirs: []string{"usr/java"},
|
||||
policy: []string{"testdata/policy"},
|
||||
},
|
||||
want: "sha256:b92c36d74172cbe3b7c07e169d9f594cd7822e8e95cb7bc1cd957ac17be62a4a",
|
||||
want: "sha256:363f70f4ee795f250873caea11c2fc94ef12945444327e7e2f8a99e3884695e0",
|
||||
},
|
||||
{
|
||||
name: "with policy/non-existent dir",
|
||||
args: args{
|
||||
key: "sha256:5c534be56eca62e756ef2ef51523feda0f19cd7c15bb0c015e3d6e3ae090bf6e",
|
||||
analyzerVersions: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
analyzerVersions: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"alpine": 1,
|
||||
"debian": 1,
|
||||
},
|
||||
},
|
||||
policy: []string{"policydir"},
|
||||
},
|
||||
|
||||
227
pkg/mapfs/file.go
Normal file
227
pkg/mapfs/file.go
Normal file
@@ -0,0 +1,227 @@
|
||||
package mapfs
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/aquasecurity/trivy/pkg/syncx"
|
||||
)
|
||||
|
||||
var separator = "/"
|
||||
|
||||
type file struct {
|
||||
path string // underlying file path
|
||||
stat fileStat
|
||||
files syncx.Map[string, *file]
|
||||
}
|
||||
|
||||
func (f *file) Open(name string) (fs.File, error) {
|
||||
if name == "" || name == "." {
|
||||
return f.open()
|
||||
}
|
||||
|
||||
if sub, err := f.getFile(name); err == nil && !sub.stat.IsDir() {
|
||||
return sub.open()
|
||||
}
|
||||
|
||||
return nil, &fs.PathError{
|
||||
Op: "open",
|
||||
Path: name,
|
||||
Err: fs.ErrNotExist,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *file) open() (fs.File, error) {
|
||||
return os.Open(f.path)
|
||||
}
|
||||
|
||||
func (f *file) Remove(name string) error {
|
||||
if name == "" || name == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
return f.removePath(name, false)
|
||||
}
|
||||
|
||||
func (f *file) RemoveAll(name string) error {
|
||||
if name == "" || name == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
return f.removePath(name, true)
|
||||
}
|
||||
|
||||
func (f *file) removePath(name string, recursive bool) error {
|
||||
parts := strings.Split(name, separator)
|
||||
if len(parts) == 1 {
|
||||
sub, ok := f.files.Load(name)
|
||||
if !ok {
|
||||
return fs.ErrNotExist
|
||||
}
|
||||
if sub.files.Len() != 0 && !recursive {
|
||||
return fs.ErrInvalid
|
||||
}
|
||||
f.files.Delete(name)
|
||||
return nil
|
||||
}
|
||||
|
||||
sub, err := f.getFile(parts[0])
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !sub.stat.IsDir() {
|
||||
return fs.ErrNotExist
|
||||
}
|
||||
|
||||
return sub.removePath(strings.Join(parts[1:], separator), recursive)
|
||||
}
|
||||
|
||||
func (f *file) getFile(name string) (*file, error) {
|
||||
if name == "" || name == "." {
|
||||
return f, nil
|
||||
}
|
||||
parts := strings.Split(name, separator)
|
||||
if len(parts) == 1 {
|
||||
f, ok := f.files.Load(name)
|
||||
if ok {
|
||||
return f, nil
|
||||
}
|
||||
return nil, fs.ErrNotExist
|
||||
}
|
||||
|
||||
sub, ok := f.files.Load(parts[0])
|
||||
if !ok || !sub.stat.IsDir() {
|
||||
return nil, fs.ErrNotExist
|
||||
}
|
||||
|
||||
return sub.getFile(strings.Join(parts[1:], separator))
|
||||
}
|
||||
|
||||
func (f *file) ReadDir(name string) ([]fs.DirEntry, error) {
|
||||
if name == "" || name == "." {
|
||||
var entries []fs.DirEntry
|
||||
var err error
|
||||
f.files.Range(func(name string, value *file) bool {
|
||||
if value.stat.IsDir() {
|
||||
entries = append(entries, &value.stat)
|
||||
} else {
|
||||
var fi os.FileInfo
|
||||
fi, err = os.Stat(value.path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
entries = append(entries, &fileStat{
|
||||
name: name,
|
||||
size: fi.Size(),
|
||||
mode: fi.Mode(),
|
||||
modTime: fi.ModTime(),
|
||||
sys: fi.Sys(),
|
||||
})
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("range error: %w", err)
|
||||
}
|
||||
sort.Slice(entries, func(i, j int) bool { return entries[i].Name() < entries[j].Name() })
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
parts := strings.Split(name, separator)
|
||||
dir, ok := f.files.Load(parts[0])
|
||||
if !ok || !dir.stat.IsDir() {
|
||||
return nil, fs.ErrNotExist
|
||||
}
|
||||
return dir.ReadDir(strings.Join(parts[1:], separator))
|
||||
}
|
||||
|
||||
func (f *file) MkdirAll(path string, perm fs.FileMode) error {
|
||||
parts := strings.Split(path, separator)
|
||||
|
||||
if path == "" || path == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
sub, ok := f.files.Load(parts[0])
|
||||
if ok && !sub.stat.IsDir() {
|
||||
return fs.ErrExist
|
||||
} else if !ok {
|
||||
if perm&fs.ModeDir == 0 {
|
||||
perm |= fs.ModeDir
|
||||
}
|
||||
|
||||
sub = &file{
|
||||
stat: fileStat{
|
||||
name: parts[0],
|
||||
size: 0x100,
|
||||
modTime: time.Now(),
|
||||
mode: perm,
|
||||
},
|
||||
files: syncx.Map[string, *file]{},
|
||||
}
|
||||
f.files.Store(parts[0], sub)
|
||||
}
|
||||
|
||||
if len(parts) == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return sub.MkdirAll(strings.Join(parts[1:], separator), perm)
|
||||
}
|
||||
|
||||
func (f *file) WriteFile(path, underlyingPath string) error {
|
||||
parts := strings.Split(path, separator)
|
||||
|
||||
if len(parts) == 1 {
|
||||
f.files.Store(parts[0], &file{
|
||||
path: underlyingPath,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
dir, ok := f.files.Load(parts[0])
|
||||
if !ok || !dir.stat.IsDir() {
|
||||
return fs.ErrNotExist
|
||||
}
|
||||
|
||||
return dir.WriteFile(strings.Join(parts[1:], separator), underlyingPath)
|
||||
}
|
||||
|
||||
func (f *file) glob(pattern string) ([]string, error) {
|
||||
var entries []string
|
||||
parts := strings.Split(pattern, separator)
|
||||
|
||||
var err error
|
||||
f.files.Range(func(name string, sub *file) bool {
|
||||
if ok, err := filepath.Match(parts[0], name); err != nil {
|
||||
return false
|
||||
} else if ok {
|
||||
if len(parts) == 1 {
|
||||
entries = append(entries, name)
|
||||
} else {
|
||||
subEntries, err := sub.glob(strings.Join(parts[1:], separator))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
for _, sub := range subEntries {
|
||||
entries = append(entries, strings.Join([]string{
|
||||
name,
|
||||
sub,
|
||||
}, separator))
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("range error: %w", err)
|
||||
}
|
||||
|
||||
sort.Strings(entries)
|
||||
return entries, nil
|
||||
}
|
||||
177
pkg/mapfs/fs.go
Normal file
177
pkg/mapfs/fs.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package mapfs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/aquasecurity/trivy/pkg/syncx"
|
||||
)
|
||||
|
||||
type allFS interface {
|
||||
fs.ReadFileFS
|
||||
fs.ReadDirFS
|
||||
fs.StatFS
|
||||
fs.GlobFS
|
||||
fs.SubFS
|
||||
}
|
||||
|
||||
// Make sure FS implements all the interfaces
|
||||
var _ allFS = &FS{}
|
||||
|
||||
// FS is an in-memory filesystem
|
||||
type FS struct {
|
||||
root *file
|
||||
}
|
||||
|
||||
// New creates a new filesystem
|
||||
func New() *FS {
|
||||
return &FS{
|
||||
root: &file{
|
||||
stat: fileStat{
|
||||
name: ".",
|
||||
size: 0x100,
|
||||
modTime: time.Now(),
|
||||
mode: 0o0700 | fs.ModeDir,
|
||||
},
|
||||
files: syncx.Map[string, *file]{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Filter removes the specified skippedFiles and returns a new FS
|
||||
func (m *FS) Filter(skippedFiles []string) (*FS, error) {
|
||||
if len(skippedFiles) == 0 {
|
||||
return m, nil
|
||||
}
|
||||
newFS := New()
|
||||
err := fs.WalkDir(m, ".", func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if d.IsDir() {
|
||||
return newFS.MkdirAll(path, d.Type().Perm())
|
||||
}
|
||||
|
||||
if slices.Contains(skippedFiles, path) {
|
||||
return nil
|
||||
}
|
||||
|
||||
f, err := m.root.getFile(path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("unable to get %s: %w", path, err)
|
||||
}
|
||||
return newFS.WriteFile(path, f.path)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("walk error", err)
|
||||
}
|
||||
|
||||
return newFS, nil
|
||||
|
||||
}
|
||||
|
||||
// Stat returns a FileInfo describing the file.
|
||||
func (m *FS) Stat(name string) (fs.FileInfo, error) {
|
||||
name = cleanPath(name)
|
||||
f, err := m.root.getFile(name)
|
||||
if err != nil {
|
||||
return nil, &fs.PathError{
|
||||
Op: "stat",
|
||||
Path: name,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
if f.stat.IsDir() {
|
||||
return &f.stat, nil
|
||||
}
|
||||
return os.Stat(f.path)
|
||||
}
|
||||
|
||||
// ReadDir reads the named directory
|
||||
// and returns a list of directory entries sorted by filename.
|
||||
func (m *FS) ReadDir(name string) ([]fs.DirEntry, error) {
|
||||
return m.root.ReadDir(cleanPath(name))
|
||||
}
|
||||
|
||||
// Open opens the named file for reading.
|
||||
func (m *FS) Open(name string) (fs.File, error) {
|
||||
return m.root.Open(cleanPath(name))
|
||||
}
|
||||
|
||||
// WriteFile writes the specified bytes to the named file. If the file exists, it will be overwritten.
|
||||
func (m *FS) WriteFile(path, underlyingPath string) error {
|
||||
return m.root.WriteFile(cleanPath(path), underlyingPath)
|
||||
}
|
||||
|
||||
// MkdirAll creates a directory named path,
|
||||
// along with any necessary parents, and returns nil,
|
||||
// or else returns an error.
|
||||
// The permission bits perm (before umask) are used for all
|
||||
// directories that MkdirAll creates.
|
||||
// If path is already a directory, MkdirAll does nothing
|
||||
// and returns nil.
|
||||
func (m *FS) MkdirAll(path string, perm fs.FileMode) error {
|
||||
return m.root.MkdirAll(cleanPath(path), perm)
|
||||
}
|
||||
|
||||
// ReadFile reads the named file and returns its contents.
|
||||
// A successful call returns a nil error, not io.EOF.
|
||||
// (Because ReadFile reads the whole file, the expected EOF
|
||||
// from the final Read is not treated as an error to be reported.)
|
||||
//
|
||||
// The caller is permitted to modify the returned byte slice.
|
||||
// This method should return a copy of the underlying data.
|
||||
func (m *FS) ReadFile(name string) ([]byte, error) {
|
||||
f, err := m.root.Open(cleanPath(name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
return io.ReadAll(f)
|
||||
}
|
||||
|
||||
// Sub returns an FS corresponding to the subtree rooted at dir.
|
||||
func (m *FS) Sub(dir string) (fs.FS, error) {
|
||||
d, err := m.root.getFile(cleanPath(dir))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FS{
|
||||
root: d,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Glob returns the names of all files matching pattern or nil
|
||||
// if there is no matching file. The syntax of patterns is the same
|
||||
// as in Match. The pattern may describe hierarchical names such as
|
||||
// /usr/*/bin/ed (assuming the Separator is '/').
|
||||
//
|
||||
// Glob ignores file system errors such as I/O errors reading directories.
|
||||
// The only possible returned error is ErrBadPattern, when pattern
|
||||
// is malformed.
|
||||
func (m *FS) Glob(pattern string) ([]string, error) {
|
||||
return m.root.glob(pattern)
|
||||
}
|
||||
|
||||
// Remove deletes a file or directory from the filesystem
|
||||
func (m *FS) Remove(path string) error {
|
||||
return m.root.Remove(cleanPath(path))
|
||||
}
|
||||
|
||||
// RemoveAll deletes a file or directory and any children if present from the filesystem
|
||||
func (m *FS) RemoveAll(path string) error {
|
||||
return m.root.RemoveAll(cleanPath(path))
|
||||
}
|
||||
|
||||
func cleanPath(path string) string {
|
||||
path = filepath.Clean(path)
|
||||
path = filepath.ToSlash(path)
|
||||
return path
|
||||
}
|
||||
432
pkg/mapfs/fs_test.go
Normal file
432
pkg/mapfs/fs_test.go
Normal file
@@ -0,0 +1,432 @@
|
||||
package mapfs_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/fs"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/samber/lo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/aquasecurity/trivy/pkg/mapfs"
|
||||
)
|
||||
|
||||
func initFS(t *testing.T) *mapfs.FS {
|
||||
fsys := mapfs.New()
|
||||
require.NoError(t, fsys.MkdirAll("a/b/c", 0700))
|
||||
require.NoError(t, fsys.MkdirAll("a/b/empty", 0700))
|
||||
require.NoError(t, fsys.WriteFile("hello.txt", "testdata/hello.txt"))
|
||||
require.NoError(t, fsys.WriteFile("a/b/b.txt", "testdata/b.txt"))
|
||||
require.NoError(t, fsys.WriteFile("a/b/c/c.txt", "testdata/c.txt"))
|
||||
require.NoError(t, fsys.WriteFile("a/b/c/.dotfile", "testdata/dotfile"))
|
||||
return fsys
|
||||
}
|
||||
|
||||
type fileInfo struct {
|
||||
name string
|
||||
fileMode fs.FileMode
|
||||
isDir bool
|
||||
size int64
|
||||
}
|
||||
|
||||
var (
|
||||
filePerm = lo.Ternary(runtime.GOOS == "windows", fs.FileMode(0666), fs.FileMode(0644))
|
||||
helloFileInfo = fileInfo{
|
||||
name: "hello.txt",
|
||||
fileMode: filePerm,
|
||||
isDir: false,
|
||||
size: 11,
|
||||
}
|
||||
btxtFileInfo = fileInfo{
|
||||
name: "b.txt",
|
||||
fileMode: filePerm,
|
||||
isDir: false,
|
||||
size: 3,
|
||||
}
|
||||
cdirFileInfo = fileInfo{
|
||||
name: "c",
|
||||
fileMode: fs.FileMode(0700) | fs.ModeDir,
|
||||
isDir: true,
|
||||
size: 256,
|
||||
}
|
||||
)
|
||||
|
||||
func assertFileInfo(t *testing.T, want fileInfo, got fs.FileInfo) {
|
||||
if got == nil {
|
||||
return
|
||||
}
|
||||
assert.Equal(t, want.name, got.Name())
|
||||
assert.Equal(t, want.fileMode, got.Mode())
|
||||
assert.Equal(t, want.isDir, got.Mode().IsDir())
|
||||
assert.Equal(t, want.isDir, got.IsDir())
|
||||
assert.Equal(t, want.size, got.Size())
|
||||
}
|
||||
|
||||
func TestFS_Filter(t *testing.T) {
|
||||
fsys := initFS(t)
|
||||
t.Run("empty files", func(t *testing.T) {
|
||||
newFS, err := fsys.Filter(nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fsys, newFS)
|
||||
})
|
||||
t.Run("happy", func(t *testing.T) {
|
||||
newFS, err := fsys.Filter([]string{
|
||||
"hello.txt",
|
||||
"a/b/c/.dotfile",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = newFS.Stat("hello.txt")
|
||||
require.ErrorIs(t, err, fs.ErrNotExist)
|
||||
_, err = newFS.Stat("a/b/c/.dotfile")
|
||||
require.ErrorIs(t, err, fs.ErrNotExist)
|
||||
fi, err := newFS.Stat("a/b/c/c.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "c.txt", fi.Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestFS_Stat(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
filePath string
|
||||
want fileInfo
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "regular file",
|
||||
filePath: "hello.txt",
|
||||
want: helloFileInfo,
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "nested file",
|
||||
filePath: "a/b/b.txt",
|
||||
want: btxtFileInfo,
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "dir",
|
||||
filePath: "a/b/c",
|
||||
want: cdirFileInfo,
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "no such file",
|
||||
filePath: "nosuch.txt",
|
||||
wantErr: assert.Error,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
fsys := initFS(t)
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := fsys.Stat(tt.filePath)
|
||||
tt.wantErr(t, err)
|
||||
assertFileInfo(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFS_ReadDir(t *testing.T) {
|
||||
type dirEntry struct {
|
||||
name string
|
||||
fileMode fs.FileMode
|
||||
isDir bool
|
||||
size int64
|
||||
fileInfo fileInfo
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
filePath string
|
||||
want []dirEntry
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "at root",
|
||||
filePath: ".",
|
||||
want: []dirEntry{
|
||||
{
|
||||
name: "a",
|
||||
fileMode: fs.FileMode(0700) | fs.ModeDir,
|
||||
isDir: true,
|
||||
size: 0x100,
|
||||
fileInfo: fileInfo{
|
||||
name: "a",
|
||||
fileMode: fs.FileMode(0700) | fs.ModeDir,
|
||||
isDir: true,
|
||||
size: 0x100,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "hello.txt",
|
||||
fileMode: filePerm,
|
||||
isDir: false,
|
||||
size: 11,
|
||||
fileInfo: helloFileInfo,
|
||||
},
|
||||
},
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "multiple files",
|
||||
filePath: "a/b/c",
|
||||
want: []dirEntry{
|
||||
{
|
||||
name: ".dotfile",
|
||||
fileMode: filePerm,
|
||||
isDir: false,
|
||||
size: 7,
|
||||
fileInfo: fileInfo{
|
||||
name: ".dotfile",
|
||||
fileMode: filePerm,
|
||||
isDir: false,
|
||||
size: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "c.txt",
|
||||
fileMode: filePerm,
|
||||
isDir: false,
|
||||
size: 0,
|
||||
fileInfo: fileInfo{
|
||||
name: "c.txt",
|
||||
fileMode: filePerm,
|
||||
isDir: false,
|
||||
size: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "no such dir",
|
||||
filePath: "nosuch/",
|
||||
wantErr: assert.Error,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
fsys := initFS(t)
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
entries, err := fsys.ReadDir(tt.filePath)
|
||||
tt.wantErr(t, err)
|
||||
|
||||
for _, z := range lo.Zip2(entries, tt.want) {
|
||||
got, want := z.A, z.B
|
||||
assert.Equal(t, want.name, got.Name())
|
||||
assert.Equal(t, want.fileMode, got.Type(), want.name)
|
||||
assert.Equal(t, want.isDir, got.IsDir(), want.name)
|
||||
|
||||
fi, err := got.Info()
|
||||
require.NoError(t, err)
|
||||
assertFileInfo(t, want.fileInfo, fi)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFS_Open(t *testing.T) {
|
||||
type file struct {
|
||||
fileInfo fileInfo
|
||||
body string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
filePath string
|
||||
want file
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "regular file",
|
||||
filePath: "hello.txt",
|
||||
want: file{
|
||||
fileInfo: helloFileInfo,
|
||||
body: "hello world",
|
||||
},
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "dir",
|
||||
filePath: "a/b/c",
|
||||
wantErr: assert.Error,
|
||||
},
|
||||
{
|
||||
name: "no such file",
|
||||
filePath: "nosuch.txt",
|
||||
wantErr: assert.Error,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
fsys := initFS(t)
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f, err := fsys.Open(tt.filePath)
|
||||
tt.wantErr(t, err)
|
||||
if f == nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
require.NoError(t, f.Close())
|
||||
}()
|
||||
|
||||
fi, err := f.Stat()
|
||||
require.NoError(t, err)
|
||||
assertFileInfo(t, tt.want.fileInfo, fi)
|
||||
|
||||
b, err := io.ReadAll(f)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want.body, string(b))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFS_ReadFile(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
filePath string
|
||||
want string
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "regular file",
|
||||
filePath: "hello.txt",
|
||||
want: "hello world",
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "no such file",
|
||||
filePath: "nosuch.txt",
|
||||
wantErr: assert.Error,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
fsys := initFS(t)
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
b, err := fsys.ReadFile(tt.filePath)
|
||||
tt.wantErr(t, err)
|
||||
assert.Equal(t, tt.want, string(b))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFS_Sub(t *testing.T) {
|
||||
fsys := initFS(t)
|
||||
sub, err := fsys.Sub("a/b")
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := sub.(fs.ReadFileFS).ReadFile("c/.dotfile")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "dotfile", string(data))
|
||||
}
|
||||
|
||||
func TestFS_Glob(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pattern string
|
||||
want []string
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "root",
|
||||
pattern: "*",
|
||||
want: []string{
|
||||
"a",
|
||||
"hello.txt",
|
||||
},
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "pattern",
|
||||
pattern: "*/b/c/*.txt",
|
||||
want: []string{
|
||||
"a/b/c/c.txt",
|
||||
},
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "no such",
|
||||
pattern: "nosuch",
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
fsys := initFS(t)
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
results, err := fsys.Glob(tt.pattern)
|
||||
tt.wantErr(t, err)
|
||||
assert.Equal(t, tt.want, results)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFS_Remove(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
path string
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "regular file",
|
||||
path: "hello.txt",
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "nested file",
|
||||
path: "a/b/b.txt",
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "empty dir",
|
||||
path: "a/b/empty",
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "empty path",
|
||||
path: "",
|
||||
wantErr: assert.NoError,
|
||||
},
|
||||
{
|
||||
name: "non-empty dir",
|
||||
path: "a/b/c",
|
||||
wantErr: assert.Error,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
fsys := initFS(t)
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := fsys.Remove(tt.path)
|
||||
tt.wantErr(t, err)
|
||||
if err != nil || tt.path == "" {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = fsys.Stat(tt.path)
|
||||
require.ErrorIs(t, err, fs.ErrNotExist)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFS_RemoveAll(t *testing.T) {
|
||||
fsys := initFS(t)
|
||||
t.Run("regular file", func(t *testing.T) {
|
||||
err := fsys.RemoveAll("hello.txt")
|
||||
require.NoError(t, err)
|
||||
_, err = fsys.Stat("hello.txt")
|
||||
require.ErrorIs(t, err, fs.ErrNotExist)
|
||||
})
|
||||
t.Run("non-empty dir", func(t *testing.T) {
|
||||
err := fsys.RemoveAll("a/b")
|
||||
require.NoError(t, err)
|
||||
_, err = fsys.Stat("a/b/c/c.txt")
|
||||
require.ErrorIs(t, err, fs.ErrNotExist)
|
||||
_, err = fsys.Stat("a/b/c/.dotfile")
|
||||
require.ErrorIs(t, err, fs.ErrNotExist)
|
||||
})
|
||||
}
|
||||
1
pkg/mapfs/testdata/b.txt
vendored
Normal file
1
pkg/mapfs/testdata/b.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
bbb
|
||||
0
pkg/mapfs/testdata/c.txt
vendored
Normal file
0
pkg/mapfs/testdata/c.txt
vendored
Normal file
1
pkg/mapfs/testdata/dotfile
vendored
Normal file
1
pkg/mapfs/testdata/dotfile
vendored
Normal file
@@ -0,0 +1 @@
|
||||
dotfile
|
||||
1
pkg/mapfs/testdata/hello.txt
vendored
Normal file
1
pkg/mapfs/testdata/hello.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
hello world
|
||||
26
pkg/mapfs/types.go
Normal file
26
pkg/mapfs/types.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package mapfs
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A fileStat is the implementation of FileInfo returned by Stat and Lstat.
|
||||
// Ported from https://github.com/golang/go/blob/518889b35cb07f3e71963f2ccfc0f96ee26a51ce/src/os/types_unix.go
|
||||
type fileStat struct {
|
||||
name string
|
||||
size int64
|
||||
mode fs.FileMode
|
||||
modTime time.Time
|
||||
sys any
|
||||
}
|
||||
|
||||
func (fs *fileStat) Name() string { return fs.name }
|
||||
func (fs *fileStat) Size() int64 { return fs.size }
|
||||
func (fs *fileStat) Mode() fs.FileMode { return fs.mode }
|
||||
func (fs *fileStat) IsDir() bool { return fs.mode.IsDir() }
|
||||
func (fs *fileStat) ModTime() time.Time { return fs.modTime }
|
||||
func (fs *fileStat) Sys() any { return &fs.sys }
|
||||
|
||||
func (fs *fileStat) Info() (fs.FileInfo, error) { return fs, nil }
|
||||
func (fs *fileStat) Type() fs.FileMode { return fs.mode }
|
||||
@@ -25,15 +25,18 @@ func TestManager_Register(t *testing.T) {
|
||||
name string
|
||||
noModuleDir bool
|
||||
moduleName string
|
||||
wantAnalyzerVersions map[string]int
|
||||
wantAnalyzerVersions analyzer.Versions
|
||||
wantPostScannerVersions map[string]int
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "happy path",
|
||||
moduleName: "happy",
|
||||
wantAnalyzerVersions: map[string]int{
|
||||
"happy": 1,
|
||||
wantAnalyzerVersions: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"happy": 1,
|
||||
},
|
||||
PostAnalyzers: map[string]int{},
|
||||
},
|
||||
wantPostScannerVersions: map[string]int{
|
||||
"happy": 1,
|
||||
@@ -42,24 +45,33 @@ func TestManager_Register(t *testing.T) {
|
||||
{
|
||||
name: "only analyzer",
|
||||
moduleName: "analyzer",
|
||||
wantAnalyzerVersions: map[string]int{
|
||||
"analyzer": 1,
|
||||
wantAnalyzerVersions: analyzer.Versions{
|
||||
Analyzers: map[string]int{
|
||||
"analyzer": 1,
|
||||
},
|
||||
PostAnalyzers: map[string]int{},
|
||||
},
|
||||
wantPostScannerVersions: map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "only post scanner",
|
||||
moduleName: "scanner",
|
||||
wantAnalyzerVersions: map[string]int{},
|
||||
name: "only post scanner",
|
||||
moduleName: "scanner",
|
||||
wantAnalyzerVersions: analyzer.Versions{
|
||||
Analyzers: map[string]int{},
|
||||
PostAnalyzers: map[string]int{},
|
||||
},
|
||||
wantPostScannerVersions: map[string]int{
|
||||
"scanner": 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no module dir",
|
||||
noModuleDir: true,
|
||||
moduleName: "happy",
|
||||
wantAnalyzerVersions: map[string]int{},
|
||||
name: "no module dir",
|
||||
noModuleDir: true,
|
||||
moduleName: "happy",
|
||||
wantAnalyzerVersions: analyzer.Versions{
|
||||
Analyzers: map[string]int{},
|
||||
PostAnalyzers: map[string]int{},
|
||||
},
|
||||
wantPostScannerVersions: map[string]int{},
|
||||
},
|
||||
}
|
||||
@@ -104,8 +116,8 @@ func TestManager_Register(t *testing.T) {
|
||||
assert.Equal(t, tt.wantAnalyzerVersions, got)
|
||||
|
||||
// Confirm the post scanner is registered
|
||||
got = post.ScannerVersions()
|
||||
assert.Equal(t, tt.wantPostScannerVersions, got)
|
||||
gotScannerVersions := post.ScannerVersions()
|
||||
assert.Equal(t, tt.wantPostScannerVersions, gotScannerVersions)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
106
pkg/parallel/walk.go
Normal file
106
pkg/parallel/walk.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package parallel
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/fs"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
dio "github.com/aquasecurity/go-dep-parser/pkg/io"
|
||||
)
|
||||
|
||||
type onFile[T any] func(string, fs.FileInfo, dio.ReadSeekerAt) (T, error)
|
||||
type onResult[T any] func(T) error
|
||||
|
||||
func WalkDir[T any](ctx context.Context, fsys fs.FS, root string, slow bool,
|
||||
onFile onFile[T], onResult onResult[T]) error {
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
paths := make(chan string)
|
||||
|
||||
g.Go(func() error {
|
||||
defer close(paths)
|
||||
err := fs.WalkDir(fsys, root, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !d.Type().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case paths <- path:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("walk error: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Start a fixed number of goroutines to read and digest files.
|
||||
c := make(chan T)
|
||||
limit := 10
|
||||
if slow {
|
||||
limit = 1
|
||||
}
|
||||
for i := 0; i < limit; i++ {
|
||||
g.Go(func() error {
|
||||
for path := range paths {
|
||||
if err := walk(ctx, fsys, path, c, onFile); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
go func() {
|
||||
_ = g.Wait()
|
||||
close(c)
|
||||
}()
|
||||
|
||||
for res := range c {
|
||||
if err := onResult(res); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Check whether any of the goroutines failed. Since g is accumulating the
|
||||
// errors, we don't need to send them (or check for them) in the individual
|
||||
// results sent on the channel.
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func walk[T any](ctx context.Context, fsys fs.FS, path string, c chan T, onFile onFile[T]) error {
|
||||
f, err := fsys.Open(path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("file open error: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
info, err := f.Stat()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("stat error: %w", err)
|
||||
}
|
||||
|
||||
rsa, ok := f.(dio.ReadSeekerAt)
|
||||
if !ok {
|
||||
return xerrors.New("type assertion failed")
|
||||
}
|
||||
res, err := onFile(path, info, rsa)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("on file: %w", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case c <- res:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
46
pkg/syncx/sync.go
Normal file
46
pkg/syncx/sync.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package syncx
|
||||
|
||||
import "sync"
|
||||
|
||||
type Map[K comparable, V any] struct {
|
||||
m sync.Map
|
||||
}
|
||||
|
||||
func (m *Map[K, V]) Delete(key K) { m.m.Delete(key) }
|
||||
|
||||
func (m *Map[K, V]) Load(key K) (value V, ok bool) {
|
||||
v, ok := m.m.Load(key)
|
||||
if !ok {
|
||||
return value, ok
|
||||
}
|
||||
return v.(V), ok
|
||||
}
|
||||
|
||||
func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
|
||||
v, loaded := m.m.LoadAndDelete(key)
|
||||
if !loaded {
|
||||
return value, loaded
|
||||
}
|
||||
return v.(V), loaded
|
||||
}
|
||||
|
||||
func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
|
||||
a, loaded := m.m.LoadOrStore(key, value)
|
||||
return a.(V), loaded
|
||||
}
|
||||
|
||||
func (m *Map[K, V]) Range(f func(key K, value V) bool) {
|
||||
m.m.Range(func(key, value any) bool { return f(key.(K), value.(V)) })
|
||||
}
|
||||
|
||||
func (m *Map[K, V]) Store(key K, value V) { m.m.Store(key, value) }
|
||||
|
||||
// Len returns the length of the map
|
||||
func (m *Map[K, V]) Len() int {
|
||||
var i int
|
||||
m.m.Range(func(k, v interface{}) bool {
|
||||
i++
|
||||
return true
|
||||
})
|
||||
return i
|
||||
}
|
||||
Reference in New Issue
Block a user