refactor(deps): Merge trivy-iac into Trivy (#6005)

This commit is contained in:
simar7
2024-02-12 04:01:27 -07:00
committed by GitHub
parent 535b5a96d9
commit 7bd3b630bb
777 changed files with 68473 additions and 45 deletions

1
.github/CODEOWNERS vendored
View File

@@ -6,6 +6,7 @@ docs/docs/scanner/misconfiguration @knqyf263 @simar7
docs/docs/target/aws.md @knqyf263 @simar7
pkg/fanal/analyzer/config @knqyf263 @simar7
pkg/cloud @knqyf263 @simar7
pkg/iac @knqyf263 @simar7
# Helm chart
helm/trivy/ @chen-keinan

34
go.mod
View File

@@ -13,7 +13,7 @@ require (
github.com/NYTimes/gziphandler v1.1.1
github.com/alicebob/miniredis/v2 v2.31.1
github.com/aquasecurity/bolt-fixtures v0.0.0-20200903104109-d34e7f983986
github.com/aquasecurity/defsec v0.94.1
github.com/aquasecurity/defsec v0.94.2-0.20240119001230-c2d65f49dfeb
github.com/aquasecurity/go-dep-parser v0.0.0-20240208080026-8cc7d408bce4
github.com/aquasecurity/go-gem-version v0.0.0-20201115065557-8eed6fe000ce
github.com/aquasecurity/go-npm-version v0.0.0-20201110091526-0b796d180798
@@ -25,7 +25,6 @@ require (
github.com/aquasecurity/tml v0.6.1
github.com/aquasecurity/trivy-aws v0.7.1
github.com/aquasecurity/trivy-db v0.0.0-20231005141211-4fc651f7ac8d
github.com/aquasecurity/trivy-iac v0.8.0
github.com/aquasecurity/trivy-java-db v0.0.0-20240109071736-184bd7481d48
github.com/aquasecurity/trivy-kubernetes v0.6.3-0.20240118072219-c433b06f98e1
github.com/aquasecurity/trivy-policies v0.8.0
@@ -117,7 +116,22 @@ require (
modernc.org/sqlite v1.28.0
)
require github.com/bitnami/go-version v0.0.0-20231130084017-bb00604d650c
require (
github.com/apparentlymart/go-cidr v1.1.0
github.com/aws/smithy-go v1.19.0
github.com/bitnami/go-version v0.0.0-20231130084017-bb00604d650c
github.com/hashicorp/go-uuid v1.0.3
github.com/hashicorp/hcl/v2 v2.19.1
github.com/liamg/iamgo v0.0.9
github.com/liamg/jfather v0.0.7
github.com/liamg/memoryfs v1.6.0
github.com/mitchellh/go-homedir v1.1.0
github.com/olekukonko/tablewriter v0.0.5
github.com/zclconf/go-cty v1.13.0
github.com/zclconf/go-cty-yaml v1.0.3
golang.org/x/crypto v0.18.0
helm.sh/helm/v3 v3.14.0
)
require (
cloud.google.com/go v0.110.10 // indirect
@@ -141,7 +155,6 @@ require (
github.com/Intevation/jsonpath v0.2.1 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver v1.5.0 // indirect
github.com/Masterminds/semver/v3 v3.2.1 // indirect
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
@@ -154,7 +167,6 @@ require (
github.com/alecthomas/chroma v0.10.0 // indirect
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect
github.com/apparentlymart/go-cidr v1.1.0 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
@@ -204,7 +216,6 @@ require (
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect
github.com/aws/aws-sdk-go-v2/service/workspaces v1.35.6 // indirect
github.com/aws/smithy-go v1.19.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
github.com/briandowns/spinner v1.23.0 // indirect
@@ -281,11 +292,9 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-safetemp v1.0.0 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hashicorp/hcl/v2 v2.19.1 // indirect
github.com/huandu/xstrings v1.4.0 // indirect
github.com/imdario/mergo v0.3.15 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
@@ -299,9 +308,6 @@ require (
github.com/klauspost/compress v1.17.2 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/liamg/iamgo v0.0.9 // indirect
github.com/liamg/jfather v0.0.7 // indirect
github.com/liamg/memoryfs v1.6.0 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/lunixbochs/struc v0.0.0-20200707160740-784aaebc1d40 // indirect
@@ -313,7 +319,6 @@ require (
github.com/microsoft/go-rustaudit v0.0.0-20220808201409-204dfee52032 // indirect
github.com/miekg/dns v1.1.53 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
@@ -332,7 +337,6 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/opencontainers/runtime-spec v1.1.0 // indirect
github.com/opencontainers/selinux v1.11.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
@@ -371,8 +375,6 @@ require (
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/yashtewari/glob-intersection v0.2.0 // indirect
github.com/yuin/gopher-lua v1.1.0 // indirect
github.com/zclconf/go-cty v1.13.0 // indirect
github.com/zclconf/go-cty-yaml v1.0.3 // indirect
go.mongodb.org/mongo-driver v1.13.1 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
@@ -383,7 +385,6 @@ require (
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
go.uber.org/goleak v1.3.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.18.0 // indirect
golang.org/x/net v0.20.0 // indirect
golang.org/x/oauth2 v0.15.0 // indirect
golang.org/x/sys v0.16.0 // indirect
@@ -400,7 +401,6 @@ require (
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
helm.sh/helm/v3 v3.14.0 // indirect
k8s.io/apiextensions-apiserver v0.29.0 // indirect
k8s.io/apimachinery v0.29.1 // indirect
k8s.io/apiserver v0.29.0 // indirect

8
go.sum
View File

@@ -249,8 +249,6 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
@@ -323,8 +321,8 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
github.com/aquasecurity/bolt-fixtures v0.0.0-20200903104109-d34e7f983986 h1:2a30xLN2sUZcMXl50hg+PJCIDdJgIvIbVcKqLJ/ZrtM=
github.com/aquasecurity/bolt-fixtures v0.0.0-20200903104109-d34e7f983986/go.mod h1:NT+jyeCzXk6vXR5MTkdn4z64TgGfE5HMLC8qfj5unl8=
github.com/aquasecurity/defsec v0.94.1 h1:lk44bfUltm0f0Dw4DbO3Ka9d/bf3N8cWclSdHXMyKF4=
github.com/aquasecurity/defsec v0.94.1/go.mod h1:wiX9BX0SOG0ZWjVIPYGPl46fyO3Gu8lJnk4rmhFR7IA=
github.com/aquasecurity/defsec v0.94.2-0.20240119001230-c2d65f49dfeb h1:7x3aMSnQhXJLcFOCivOmNBk0zAVLKkEk5UWkrRxxHIk=
github.com/aquasecurity/defsec v0.94.2-0.20240119001230-c2d65f49dfeb/go.mod h1:wiX9BX0SOG0ZWjVIPYGPl46fyO3Gu8lJnk4rmhFR7IA=
github.com/aquasecurity/go-dep-parser v0.0.0-20240208080026-8cc7d408bce4 h1:6qs80w4qPbPnF6GhbIifSANqfCrq90CKtSUBaw6p0z0=
github.com/aquasecurity/go-dep-parser v0.0.0-20240208080026-8cc7d408bce4/go.mod h1:P0PmelcN1ABKJrDzRbPnn6hK7RvgI+xmjiV/9uPaNnY=
github.com/aquasecurity/go-gem-version v0.0.0-20201115065557-8eed6fe000ce h1:QgBRgJvtEOBtUXilDb1MLi1p1MWoyFDXAu5DEUl5nwM=
@@ -350,8 +348,6 @@ github.com/aquasecurity/trivy-aws v0.7.1 h1:XElKZsP9Hqe2JVekQgGCIkFtgRgVlP+80wKL
github.com/aquasecurity/trivy-aws v0.7.1/go.mod h1:bJT7pzsqo9q5yi3arJSt789bAH0eDb7c+niFYMBNcMQ=
github.com/aquasecurity/trivy-db v0.0.0-20231005141211-4fc651f7ac8d h1:fjI9mkoTUAkbGqpzt9nJsO24RAdfG+ZSiLFj0G2jO8c=
github.com/aquasecurity/trivy-db v0.0.0-20231005141211-4fc651f7ac8d/go.mod h1:cj9/QmD9N3OZnKQMp+/DvdV+ym3HyIkd4e+F0ZM3ZGs=
github.com/aquasecurity/trivy-iac v0.8.0 h1:NKFhk/BTwQ0jIh4t74V8+6UIGUvPlaxO9HPlSMQi3fo=
github.com/aquasecurity/trivy-iac v0.8.0/go.mod h1:ARiMeNqcaVWOXJmp8hmtMnNm/Jd836IOmDBUW5r4KEk=
github.com/aquasecurity/trivy-java-db v0.0.0-20240109071736-184bd7481d48 h1:JVgBIuIYbwG+ekC5lUHUpGJboPYiCcxiz06RCtz8neI=
github.com/aquasecurity/trivy-java-db v0.0.0-20240109071736-184bd7481d48/go.mod h1:Ldya37FLi0e/5Cjq2T5Bty7cFkzUDwTcPeQua+2M8i8=
github.com/aquasecurity/trivy-kubernetes v0.6.3-0.20240118072219-c433b06f98e1 h1:/LsIHMQJ4SOxZeib/bvLP7S3YDTXJVIsQyS4kIIP0GQ=

114
internal/testutil/util.go Normal file
View File

@@ -0,0 +1,114 @@
package testutil
import (
"encoding/json"
"io/fs"
"path/filepath"
"strings"
"testing"
"github.com/liamg/memoryfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/aquasecurity/defsec/pkg/scan"
)
func AssertRuleFound(t *testing.T, ruleID string, results scan.Results, message string, args ...interface{}) {
found := ruleIDInResults(ruleID, results.GetFailed())
assert.True(t, found, append([]interface{}{message}, args...)...)
for _, result := range results.GetFailed() {
if result.Rule().LongID() == ruleID {
m := result.Metadata()
meta := &m
for meta != nil {
assert.NotNil(t, meta.Range(), 0)
assert.Greater(t, meta.Range().GetStartLine(), 0)
assert.Greater(t, meta.Range().GetEndLine(), 0)
meta = meta.Parent()
}
}
}
}
func AssertRuleNotFound(t *testing.T, ruleID string, results scan.Results, message string, args ...interface{}) {
found := ruleIDInResults(ruleID, results.GetFailed())
assert.False(t, found, append([]interface{}{message}, args...)...)
}
func ruleIDInResults(ruleID string, results scan.Results) bool {
for _, res := range results {
if res.Rule().LongID() == ruleID {
return true
}
}
return false
}
func CreateFS(t *testing.T, files map[string]string) fs.FS {
memfs := memoryfs.New()
for name, content := range files {
name := strings.TrimPrefix(name, "/")
err := memfs.MkdirAll(filepath.Dir(name), 0o700)
require.NoError(t, err)
err = memfs.WriteFile(name, []byte(content), 0o644)
require.NoError(t, err)
}
return memfs
}
func AssertDefsecEqual(t *testing.T, expected, actual interface{}) {
expectedJson, err := json.MarshalIndent(expected, "", "\t")
require.NoError(t, err)
actualJson, err := json.MarshalIndent(actual, "", "\t")
require.NoError(t, err)
if expectedJson[0] == '[' {
var expectedSlice []map[string]interface{}
require.NoError(t, json.Unmarshal(expectedJson, &expectedSlice))
var actualSlice []map[string]interface{}
require.NoError(t, json.Unmarshal(actualJson, &actualSlice))
expectedSlice = purgeMetadataSlice(expectedSlice)
actualSlice = purgeMetadataSlice(actualSlice)
assert.Equal(t, expectedSlice, actualSlice, "defsec adapted and expected values do not match")
} else {
var expectedMap map[string]interface{}
require.NoError(t, json.Unmarshal(expectedJson, &expectedMap))
var actualMap map[string]interface{}
require.NoError(t, json.Unmarshal(actualJson, &actualMap))
expectedMap = purgeMetadata(expectedMap)
actualMap = purgeMetadata(actualMap)
assert.Equal(t, expectedMap, actualMap, "defsec adapted and expected values do not match")
}
}
func purgeMetadata(input map[string]interface{}) map[string]interface{} {
for k, v := range input {
if k == "metadata" || k == "Metadata" {
delete(input, k)
continue
}
if v, ok := v.(map[string]interface{}); ok {
input[k] = purgeMetadata(v)
}
if v, ok := v.([]interface{}); ok {
if len(v) > 0 {
if _, ok := v[0].(map[string]interface{}); ok {
maps := make([]map[string]interface{}, len(v))
for i := range v {
maps[i] = v[i].(map[string]interface{})
}
input[k] = purgeMetadataSlice(maps)
}
}
}
}
return input
}
func purgeMetadataSlice(input []map[string]interface{}) []map[string]interface{} {
for i := range input {
input[i] = purgeMetadata(input[i])
}
return input
}

54
pkg/extrafs/extrafs.go Normal file
View File

@@ -0,0 +1,54 @@
package extrafs
import (
"io/fs"
"os"
"path/filepath"
)
/*
Go does not currently support symlinks in io/fs.
We work around this by wrapping the fs.FS returned by os.DirFS with our own type which bolts on the ReadLinkFS
*/
type OSFS interface {
fs.FS
fs.StatFS
}
type ReadLinkFS interface {
ResolveSymlink(name, dir string) (string, error)
}
type FS interface {
OSFS
ReadLinkFS
}
type filesystem struct {
root string
underlying OSFS
}
func OSDir(path string) FS {
return &filesystem{
root: path,
underlying: os.DirFS(path).(OSFS),
}
}
func (f *filesystem) Open(name string) (fs.File, error) {
return f.underlying.Open(name)
}
func (f *filesystem) Stat(name string) (fs.FileInfo, error) {
return f.underlying.Stat(name)
}
func (f *filesystem) ResolveSymlink(name, dir string) (string, error) {
link, err := os.Readlink(filepath.Join(f.root, dir, name))
if err == nil {
return filepath.Join(dir, link), nil
}
return name, nil
}

View File

@@ -3,9 +3,9 @@ package terraform
import (
"os"
"github.com/aquasecurity/trivy-iac/pkg/detection"
"github.com/aquasecurity/trivy/pkg/fanal/analyzer"
"github.com/aquasecurity/trivy/pkg/fanal/analyzer/config"
"github.com/aquasecurity/trivy/pkg/iac/detection"
"github.com/aquasecurity/trivy/pkg/misconf"
)

View File

@@ -1,6 +1,8 @@
package analyzer
import "github.com/aquasecurity/trivy-iac/pkg/detection"
import (
"github.com/aquasecurity/trivy/pkg/iac/detection"
)
type Type string

View File

@@ -0,0 +1,49 @@
package arm
import (
"context"
"github.com/aquasecurity/defsec/pkg/providers/azure"
"github.com/aquasecurity/defsec/pkg/state"
"github.com/aquasecurity/trivy/pkg/iac/adapters/arm/appservice"
"github.com/aquasecurity/trivy/pkg/iac/adapters/arm/authorization"
"github.com/aquasecurity/trivy/pkg/iac/adapters/arm/compute"
"github.com/aquasecurity/trivy/pkg/iac/adapters/arm/container"
"github.com/aquasecurity/trivy/pkg/iac/adapters/arm/database"
"github.com/aquasecurity/trivy/pkg/iac/adapters/arm/datafactory"
"github.com/aquasecurity/trivy/pkg/iac/adapters/arm/datalake"
"github.com/aquasecurity/trivy/pkg/iac/adapters/arm/keyvault"
"github.com/aquasecurity/trivy/pkg/iac/adapters/arm/monitor"
"github.com/aquasecurity/trivy/pkg/iac/adapters/arm/network"
"github.com/aquasecurity/trivy/pkg/iac/adapters/arm/securitycenter"
"github.com/aquasecurity/trivy/pkg/iac/adapters/arm/storage"
"github.com/aquasecurity/trivy/pkg/iac/adapters/arm/synapse"
scanner "github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
// Adapt adapts an azure arm instance
func Adapt(ctx context.Context, deployment scanner.Deployment) *state.State {
return &state.State{
Azure: adaptAzure(deployment),
}
}
func adaptAzure(deployment scanner.Deployment) azure.Azure {
return azure.Azure{
AppService: appservice.Adapt(deployment),
Authorization: authorization.Adapt(deployment),
Compute: compute.Adapt(deployment),
Container: container.Adapt(deployment),
Database: database.Adapt(deployment),
DataFactory: datafactory.Adapt(deployment),
DataLake: datalake.Adapt(deployment),
KeyVault: keyvault.Adapt(deployment),
Monitor: monitor.Adapt(deployment),
Network: network.Adapt(deployment),
SecurityCenter: securitycenter.Adapt(deployment),
Storage: storage.Adapt(deployment),
Synapse: synapse.Adapt(deployment),
}
}

View File

@@ -0,0 +1,58 @@
package appservice
import (
"github.com/aquasecurity/defsec/pkg/providers/azure/appservice"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func Adapt(deployment azure.Deployment) appservice.AppService {
return appservice.AppService{
Services: adaptServices(deployment),
FunctionApps: adaptFunctionApps(deployment),
}
}
func adaptFunctionApps(deployment azure.Deployment) []appservice.FunctionApp {
var functionApps []appservice.FunctionApp
for _, resource := range deployment.GetResourcesByType("Microsoft.Web/sites") {
functionApps = append(functionApps, adaptFunctionApp(resource))
}
return functionApps
}
func adaptServices(deployment azure.Deployment) []appservice.Service {
var services []appservice.Service
for _, resource := range deployment.GetResourcesByType("Microsoft.Web/sites") {
services = append(services, adaptService(resource))
}
return services
}
func adaptFunctionApp(resource azure.Resource) appservice.FunctionApp {
return appservice.FunctionApp{
Metadata: resource.Metadata,
HTTPSOnly: resource.Properties.GetMapValue("httpsOnly").AsBoolValue(false, resource.Properties.GetMetadata()),
}
}
func adaptService(resource azure.Resource) appservice.Service {
return appservice.Service{
Metadata: resource.Metadata,
EnableClientCert: resource.Properties.GetMapValue("clientCertEnabled").AsBoolValue(false, resource.Properties.GetMetadata()),
Identity: struct{ Type defsecTypes.StringValue }{
Type: resource.Properties.GetMapValue("identity").GetMapValue("type").AsStringValue("", resource.Properties.GetMetadata()),
},
Authentication: struct{ Enabled defsecTypes.BoolValue }{
Enabled: resource.Properties.GetMapValue("siteAuthSettings").GetMapValue("enabled").AsBoolValue(false, resource.Properties.GetMetadata()),
},
Site: struct {
EnableHTTP2 defsecTypes.BoolValue
MinimumTLSVersion defsecTypes.StringValue
}{
EnableHTTP2: resource.Properties.GetMapValue("httpsOnly").AsBoolValue(false, resource.Properties.GetMetadata()),
MinimumTLSVersion: resource.Properties.GetMapValue("minTlsVersion").AsStringValue("", resource.Properties.GetMetadata()),
},
}
}

View File

@@ -0,0 +1,38 @@
package authorization
import (
"github.com/aquasecurity/defsec/pkg/providers/azure/authorization"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func Adapt(deployment azure.Deployment) authorization.Authorization {
return authorization.Authorization{
RoleDefinitions: adaptRoleDefinitions(deployment),
}
}
func adaptRoleDefinitions(deployment azure.Deployment) (roleDefinitions []authorization.RoleDefinition) {
for _, resource := range deployment.GetResourcesByType("Microsoft.Authorization/roleDefinitions") {
roleDefinitions = append(roleDefinitions, adaptRoleDefinition(resource))
}
return roleDefinitions
}
func adaptRoleDefinition(resource azure.Resource) authorization.RoleDefinition {
return authorization.RoleDefinition{
Metadata: resource.Metadata,
Permissions: adaptPermissions(resource),
AssignableScopes: resource.Properties.GetMapValue("assignableScopes").AsStringValuesList(""),
}
}
func adaptPermissions(resource azure.Resource) (permissions []authorization.Permission) {
for _, permission := range resource.Properties.GetMapValue("permissions").AsList() {
permissions = append(permissions, authorization.Permission{
Metadata: resource.Metadata,
Actions: permission.GetMapValue("actions").AsStringValuesList(""),
})
}
return permissions
}

View File

@@ -0,0 +1,85 @@
package compute
import (
"github.com/aquasecurity/defsec/pkg/providers/azure/compute"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func Adapt(deployment azure.Deployment) compute.Compute {
return compute.Compute{
LinuxVirtualMachines: adaptLinuxVirtualMachines(deployment),
WindowsVirtualMachines: adaptWindowsVirtualMachines(deployment),
ManagedDisks: adaptManagedDisks(deployment),
}
}
func adaptManagedDisks(deployment azure.Deployment) (managedDisks []compute.ManagedDisk) {
for _, resource := range deployment.GetResourcesByType("Microsoft.Compute/disks") {
managedDisks = append(managedDisks, adaptManagedDisk(resource))
}
return managedDisks
}
func adaptManagedDisk(resource azure.Resource) compute.ManagedDisk {
hasEncryption := resource.Properties.HasKey("encryption")
return compute.ManagedDisk{
Metadata: resource.Metadata,
Encryption: compute.Encryption{
Metadata: resource.Metadata,
Enabled: defsecTypes.Bool(hasEncryption, resource.Metadata),
},
}
}
func adaptWindowsVirtualMachines(deployment azure.Deployment) (windowsVirtualMachines []compute.WindowsVirtualMachine) {
for _, resource := range deployment.GetResourcesByType("Microsoft.Compute/virtualMachines") {
if resource.Properties.GetMapValue("osProfile").GetMapValue("windowsConfiguration").AsMap() != nil {
windowsVirtualMachines = append(windowsVirtualMachines, adaptWindowsVirtualMachine(resource))
}
}
return windowsVirtualMachines
}
func adaptWindowsVirtualMachine(resource azure.Resource) compute.WindowsVirtualMachine {
return compute.WindowsVirtualMachine{
Metadata: resource.Metadata,
VirtualMachine: compute.VirtualMachine{
Metadata: resource.Metadata,
CustomData: resource.Properties.GetMapValue("osProfile").
GetMapValue("customData").AsStringValue("", resource.Metadata),
},
}
}
func adaptLinuxVirtualMachines(deployment azure.Deployment) (linuxVirtualMachines []compute.LinuxVirtualMachine) {
for _, resource := range deployment.GetResourcesByType("Microsoft.Compute/virtualMachines") {
if resource.Properties.GetMapValue("osProfile").GetMapValue("linuxConfiguration").AsMap() != nil {
linuxVirtualMachines = append(linuxVirtualMachines, adaptLinuxVirtualMachine(resource))
}
}
return linuxVirtualMachines
}
func adaptLinuxVirtualMachine(resource azure.Resource) compute.LinuxVirtualMachine {
return compute.LinuxVirtualMachine{
Metadata: resource.Metadata,
VirtualMachine: compute.VirtualMachine{
Metadata: resource.Metadata,
CustomData: resource.Properties.GetMapValue("osProfile").
GetMapValue("customData").AsStringValue("", resource.Metadata),
},
OSProfileLinuxConfig: compute.OSProfileLinuxConfig{
Metadata: resource.Metadata,
DisablePasswordAuthentication: resource.Properties.GetMapValue("osProfile").
GetMapValue("linuxConfiguration").
GetMapValue("disablePasswordAuthentication").AsBoolValue(false, resource.Metadata),
},
}
}

View File

@@ -0,0 +1,59 @@
package compute
import (
"testing"
"github.com/aquasecurity/defsec/pkg/types"
azure2 "github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_AdaptLinuxVM(t *testing.T) {
input := azure2.Deployment{
Resources: []azure2.Resource{
{
Type: azure2.NewValue("Microsoft.Compute/virtualMachines", types.NewTestMetadata()),
Properties: azure2.NewValue(map[string]azure2.Value{
"osProfile": azure2.NewValue(map[string]azure2.Value{
"linuxConfiguration": azure2.NewValue(map[string]azure2.Value{
"disablePasswordAuthentication": azure2.NewValue(true, types.NewTestMetadata()),
}, types.NewTestMetadata()),
}, types.NewTestMetadata()),
}, types.NewTestMetadata()),
},
},
}
output := Adapt(input)
require.Len(t, output.LinuxVirtualMachines, 1)
require.Len(t, output.WindowsVirtualMachines, 0)
linuxVM := output.LinuxVirtualMachines[0]
assert.True(t, linuxVM.OSProfileLinuxConfig.DisablePasswordAuthentication.IsTrue())
}
func Test_AdaptWindowsVM(t *testing.T) {
input := azure2.Deployment{
Resources: []azure2.Resource{
{
Type: azure2.NewValue("Microsoft.Compute/virtualMachines", types.NewTestMetadata()),
Properties: azure2.NewValue(map[string]azure2.Value{
"osProfile": azure2.NewValue(map[string]azure2.Value{
"windowsConfiguration": azure2.NewValue(map[string]azure2.Value{}, types.NewTestMetadata()),
}, types.NewTestMetadata()),
}, types.NewTestMetadata()),
},
},
}
output := Adapt(input)
require.Len(t, output.LinuxVirtualMachines, 0)
require.Len(t, output.WindowsVirtualMachines, 1)
}

View File

@@ -0,0 +1,17 @@
package container
import (
"github.com/aquasecurity/defsec/pkg/providers/azure/container"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func Adapt(deployment azure.Deployment) container.Container {
return container.Container{
KubernetesClusters: adaptKubernetesClusters(deployment),
}
}
func adaptKubernetesClusters(deployment azure.Deployment) []container.KubernetesCluster {
return nil
}

View File

@@ -0,0 +1,35 @@
package database
import (
"github.com/aquasecurity/defsec/pkg/providers/azure/database"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func Adapt(deployment azure.Deployment) database.Database {
return database.Database{
MSSQLServers: adaptMSSQLServers(deployment),
MariaDBServers: adaptMariaDBServers(deployment),
MySQLServers: adaptMySQLServers(deployment),
PostgreSQLServers: adaptPostgreSQLServers(deployment),
}
}
func adaptMySQLServers(deployment azure.Deployment) (mysqlDbServers []database.MySQLServer) {
for _, resource := range deployment.GetResourcesByType("Microsoft.DBforMySQL/servers") {
mysqlDbServers = append(mysqlDbServers, adaptMySQLServer(resource, deployment))
}
return mysqlDbServers
}
func adaptMySQLServer(resource azure.Resource, deployment azure.Deployment) database.MySQLServer {
return database.MySQLServer{
Metadata: resource.Metadata,
Server: database.Server{
Metadata: resource.Metadata,
EnableSSLEnforcement: resource.Properties.GetMapValue("sslEnforcement").AsBoolValue(false, resource.Metadata),
MinimumTLSVersion: resource.Properties.GetMapValue("minimalTlsVersion").AsStringValue("TLSEnforcementDisabled", resource.Metadata),
EnablePublicNetworkAccess: resource.Properties.GetMapValue("publicNetworkAccess").AsBoolValue(false, resource.Metadata),
FirewallRules: addFirewallRule(resource),
},
}
}

View File

@@ -0,0 +1,18 @@
package database
import (
"github.com/aquasecurity/defsec/pkg/providers/azure/database"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func addFirewallRule(resource azure.Resource) []database.FirewallRule {
var rules []database.FirewallRule
for _, rule := range resource.Properties.GetMapValue("firewallRules").AsMap() {
rules = append(rules, database.FirewallRule{
Metadata: rule.Metadata,
StartIP: rule.GetMapValue("startIpAddress").AsStringValue("", rule.Metadata),
EndIP: rule.GetMapValue("endIpAddress").AsStringValue("", rule.Metadata),
})
}
return rules
}

View File

@@ -0,0 +1,27 @@
package database
import (
"github.com/aquasecurity/defsec/pkg/providers/azure/database"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func adaptMariaDBServers(deployment azure.Deployment) (mariaDbServers []database.MariaDBServer) {
for _, resource := range deployment.GetResourcesByType("Microsoft.DBforMariaDB/servers") {
mariaDbServers = append(mariaDbServers, adaptMariaDBServer(resource, deployment))
}
return mariaDbServers
}
func adaptMariaDBServer(resource azure.Resource, deployment azure.Deployment) database.MariaDBServer {
return database.MariaDBServer{
Metadata: resource.Metadata,
Server: database.Server{
Metadata: resource.Metadata,
EnableSSLEnforcement: resource.Properties.GetMapValue("sslEnforcement").AsBoolValue(false, resource.Metadata),
MinimumTLSVersion: resource.Properties.GetMapValue("minimalTlsVersion").AsStringValue("TLSEnforcementDisabled", resource.Metadata),
EnablePublicNetworkAccess: resource.Properties.GetMapValue("publicNetworkAccess").AsBoolValue(false, resource.Metadata),
FirewallRules: addFirewallRule(resource),
},
}
}

View File

@@ -0,0 +1,61 @@
package database
import (
"github.com/aquasecurity/defsec/pkg/providers/azure/database"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
azure2 "github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func adaptMSSQLServers(deployment azure2.Deployment) (msSQlServers []database.MSSQLServer) {
for _, resource := range deployment.GetResourcesByType("Microsoft.Sql/servers") {
msSQlServers = append(msSQlServers, adaptMSSQLServer(resource, deployment))
}
return msSQlServers
}
func adaptMSSQLServer(resource azure2.Resource, deployment azure2.Deployment) database.MSSQLServer {
return database.MSSQLServer{
Metadata: resource.Metadata,
Server: database.Server{
Metadata: resource.Metadata,
EnableSSLEnforcement: resource.Properties.GetMapValue("sslEnforcement").AsBoolValue(false, resource.Metadata),
MinimumTLSVersion: resource.Properties.GetMapValue("minimalTlsVersion").AsStringValue("TLSEnforcementDisabled", resource.Metadata),
EnablePublicNetworkAccess: resource.Properties.GetMapValue("publicNetworkAccess").AsBoolValue(false, resource.Metadata),
FirewallRules: addFirewallRule(resource),
},
ExtendedAuditingPolicies: adaptExtendedAuditingPolicies(resource, deployment),
SecurityAlertPolicies: adaptSecurityAlertPolicies(resource, deployment),
}
}
func adaptExtendedAuditingPolicies(resource azure2.Resource, deployment azure2.Deployment) (policies []database.ExtendedAuditingPolicy) {
for _, policy := range deployment.GetResourcesByType("Microsoft.Sql/servers/extendedAuditingSettings") {
policies = append(policies, database.ExtendedAuditingPolicy{
Metadata: policy.Metadata,
RetentionInDays: policy.Properties.GetMapValue("retentionDays").AsIntValue(0, policy.Metadata),
})
}
return policies
}
func adaptSecurityAlertPolicies(resource azure2.Resource, deployment azure2.Deployment) (policies []database.SecurityAlertPolicy) {
for _, policy := range deployment.GetResourcesByType("Microsoft.Sql/servers/securityAlertPolicies") {
policies = append(policies, database.SecurityAlertPolicy{
Metadata: policy.Metadata,
EmailAddresses: adaptStringList(policy.Properties.GetMapValue("emailAddresses")),
DisabledAlerts: adaptStringList(policy.Properties.GetMapValue("disabledAlerts")),
EmailAccountAdmins: policy.Properties.GetMapValue("emailAccountAdmins").AsBoolValue(false, policy.Metadata),
})
}
return policies
}
func adaptStringList(value azure2.Value) []defsecTypes.StringValue {
var list []defsecTypes.StringValue
for _, v := range value.AsList() {
list = append(list, v.AsStringValue("", value.Metadata))
}
return list
}

View File

@@ -0,0 +1,64 @@
package database
import (
"fmt"
"strings"
"github.com/aquasecurity/defsec/pkg/providers/azure/database"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func adaptPostgreSQLServers(deployment azure.Deployment) (databases []database.PostgreSQLServer) {
for _, resource := range deployment.GetResourcesByType("Microsoft.DBforPostgreSQL/servers") {
databases = append(databases, adaptPostgreSQLServer(resource, deployment))
}
return databases
}
func adaptPostgreSQLServer(resource azure.Resource, deployment azure.Deployment) database.PostgreSQLServer {
return database.PostgreSQLServer{
Metadata: resource.Metadata,
Server: database.Server{
Metadata: resource.Metadata,
EnableSSLEnforcement: resource.Properties.GetMapValue("sslEnforcement").AsBoolValue(false, resource.Metadata),
MinimumTLSVersion: resource.Properties.GetMapValue("minimalTlsVersion").AsStringValue("TLSEnforcementDisabled", resource.Metadata),
EnablePublicNetworkAccess: resource.Properties.GetMapValue("publicNetworkAccess").AsBoolValue(false, resource.Metadata),
FirewallRules: addFirewallRule(resource),
},
Config: adaptPostgreSQLConfiguration(resource, deployment),
}
}
func adaptPostgreSQLConfiguration(resource azure.Resource, deployment azure.Deployment) database.PostgresSQLConfig {
parent := fmt.Sprintf("%s/", resource.Name.AsString())
config := database.PostgresSQLConfig{
Metadata: resource.Metadata,
LogCheckpoints: defsecTypes.BoolDefault(false, resource.Metadata),
ConnectionThrottling: defsecTypes.BoolDefault(false, resource.Metadata),
LogConnections: defsecTypes.BoolDefault(false, resource.Metadata),
}
for _, configuration := range deployment.GetResourcesByType("Microsoft.DBforPostgreSQL/servers/configurations") {
if strings.HasPrefix(configuration.Name.AsString(), parent) {
val := configuration.Properties.GetMapValue("value")
if strings.HasSuffix(configuration.Name.AsString(), "log_checkpoints") {
config.LogCheckpoints = val.AsBoolValue(false, configuration.Metadata)
continue
}
if strings.HasSuffix(configuration.Name.AsString(), "log_connections") {
config.LogConnections = val.AsBoolValue(false, configuration.Metadata)
continue
}
if strings.HasSuffix(configuration.Name.AsString(), "connection_throttling") {
config.ConnectionThrottling = val.AsBoolValue(false, configuration.Metadata)
continue
}
}
}
return config
}

View File

@@ -0,0 +1,27 @@
package datafactory
import (
"github.com/aquasecurity/defsec/pkg/providers/azure/datafactory"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func Adapt(deployment azure.Deployment) datafactory.DataFactory {
return datafactory.DataFactory{
DataFactories: adaptDataFactories(deployment),
}
}
func adaptDataFactories(deployment azure.Deployment) (factories []datafactory.Factory) {
for _, resource := range deployment.GetResourcesByType("Microsoft.DataFactory/factories") {
factories = append(factories, adaptDataFactory(resource))
}
return factories
}
func adaptDataFactory(resource azure.Resource) datafactory.Factory {
return datafactory.Factory{
Metadata: resource.Metadata,
EnablePublicNetwork: resource.Properties.GetMapValue("publicNetworkAccess").AsBoolValue(true, resource.Metadata),
}
}

View File

@@ -0,0 +1,28 @@
package datalake
import (
"github.com/aquasecurity/defsec/pkg/providers/azure/datalake"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func Adapt(deployment azure.Deployment) datalake.DataLake {
return datalake.DataLake{
Stores: adaptStores(deployment),
}
}
func adaptStores(deployment azure.Deployment) (stores []datalake.Store) {
for _, resource := range deployment.GetResourcesByType("Microsoft.DataLakeStore/accounts") {
stores = append(stores, adaptStore(resource))
}
return stores
}
func adaptStore(resource azure.Resource) datalake.Store {
return datalake.Store{
Metadata: resource.Metadata,
EnableEncryption: resource.Properties.GetMapValue("encryptionState").AsBoolValue(false, resource.Metadata),
}
}

View File

@@ -0,0 +1,64 @@
package keyvault
import (
"github.com/aquasecurity/defsec/pkg/providers/azure/keyvault"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func Adapt(deployment azure.Deployment) keyvault.KeyVault {
return keyvault.KeyVault{
Vaults: adaptVaults(deployment),
}
}
func adaptVaults(deployment azure.Deployment) (vaults []keyvault.Vault) {
for _, resource := range deployment.GetResourcesByType("Microsoft.KeyVault/vaults") {
vaults = append(vaults, adaptVault(resource, deployment))
}
return vaults
}
func adaptVault(resource azure.Resource, deployment azure.Deployment) keyvault.Vault {
return keyvault.Vault{
Metadata: resource.Metadata,
Secrets: adaptSecrets(resource, deployment),
Keys: adaptKeys(resource, deployment),
EnablePurgeProtection: resource.Properties.GetMapValue("enablePurgeProtection").AsBoolValue(false, resource.Metadata),
SoftDeleteRetentionDays: resource.Properties.GetMapValue("softDeleteRetentionInDays").AsIntValue(7, resource.Metadata),
NetworkACLs: keyvault.NetworkACLs{
Metadata: resource.Metadata,
DefaultAction: resource.Properties.GetMapValue("properties").GetMapValue("networkAcls").GetMapValue("defaultAction").AsStringValue("", resource.Metadata),
},
}
}
func adaptKeys(resource azure.Resource, deployment azure.Deployment) (keys []keyvault.Key) {
for _, resource := range deployment.GetResourcesByType("Microsoft.KeyVault/vaults/keys") {
keys = append(keys, adaptKey(resource))
}
return keys
}
func adaptKey(resource azure.Resource) keyvault.Key {
return keyvault.Key{
Metadata: resource.Metadata,
ExpiryDate: resource.Properties.GetMapValue("attributes").GetMapValue("exp").AsTimeValue(resource.Metadata),
}
}
func adaptSecrets(resource azure.Resource, deployment azure.Deployment) (secrets []keyvault.Secret) {
for _, resource := range deployment.GetResourcesByType("Microsoft.KeyVault/vaults/secrets") {
secrets = append(secrets, adaptSecret(resource))
}
return secrets
}
func adaptSecret(resource azure.Resource) keyvault.Secret {
return keyvault.Secret{
Metadata: resource.Metadata,
ContentType: resource.Properties.GetMapValue("contentType").AsStringValue("", resource.Metadata),
ExpiryDate: resource.Properties.GetMapValue("attributes").GetMapValue("exp").AsTimeValue(resource.Metadata),
}
}

View File

@@ -0,0 +1,45 @@
package monitor
import (
"github.com/aquasecurity/defsec/pkg/providers/azure/monitor"
"github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func Adapt(deployment azure.Deployment) monitor.Monitor {
return monitor.Monitor{
LogProfiles: adaptLogProfiles(deployment),
}
}
func adaptLogProfiles(deployment azure.Deployment) (logProfiles []monitor.LogProfile) {
for _, resource := range deployment.GetResourcesByType("Microsoft.Insights/logProfiles") {
logProfiles = append(logProfiles, adaptLogProfile(resource))
}
return logProfiles
}
func adaptLogProfile(resource azure.Resource) monitor.LogProfile {
categories := resource.Properties.GetMapValue("categories").AsList()
var categoriesList []types.StringValue
for _, category := range categories {
categoriesList = append(categoriesList, category.AsStringValue("", category.Metadata))
}
locations := resource.Properties.GetMapValue("locations").AsList()
var locationsList []types.StringValue
for _, location := range locations {
locationsList = append(locationsList, location.AsStringValue("", location.Metadata))
}
return monitor.LogProfile{
Metadata: resource.Metadata,
RetentionPolicy: monitor.RetentionPolicy{
Metadata: resource.Metadata,
Enabled: resource.Properties.GetMapValue("retentionPolicy").GetMapValue("enabled").AsBoolValue(false, resource.Metadata),
Days: resource.Properties.GetMapValue("retentionPolicy").GetMapValue("days").AsIntValue(0, resource.Metadata),
},
Categories: categoriesList,
Locations: locationsList,
}
}

View File

@@ -0,0 +1,126 @@
package network
import (
"strconv"
"strings"
"github.com/aquasecurity/defsec/pkg/providers/azure/network"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func Adapt(deployment azure.Deployment) network.Network {
return network.Network{
SecurityGroups: adaptSecurityGroups(deployment),
NetworkWatcherFlowLogs: adaptNetworkWatcherFlowLogs(deployment),
}
}
func adaptSecurityGroups(deployment azure.Deployment) (sgs []network.SecurityGroup) {
for _, resource := range deployment.GetResourcesByType("Microsoft.Network/networkSecurityGroups") {
sgs = append(sgs, adaptSecurityGroup(resource, deployment))
}
return sgs
}
func adaptSecurityGroup(resource azure.Resource, deployment azure.Deployment) network.SecurityGroup {
return network.SecurityGroup{
Metadata: resource.Metadata,
Rules: adaptSecurityGroupRules(resource, deployment),
}
}
func adaptSecurityGroupRules(resource azure.Resource, deployment azure.Deployment) (rules []network.SecurityGroupRule) {
for _, resource := range deployment.GetResourcesByType("Microsoft.Network/networkSecurityGroups/securityRules") {
rules = append(rules, adaptSecurityGroupRule(resource))
}
return rules
}
func adaptSecurityGroupRule(resource azure.Resource) network.SecurityGroupRule {
sourceAddressPrefixes := resource.Properties.GetMapValue("sourceAddressPrefixes").AsStringValuesList("")
sourceAddressPrefixes = append(sourceAddressPrefixes, resource.Properties.GetMapValue("sourceAddressPrefix").AsStringValue("", resource.Metadata))
var sourcePortRanges []network.PortRange
for _, portRange := range resource.Properties.GetMapValue("sourcePortRanges").AsList() {
sourcePortRanges = append(sourcePortRanges, expandRange(portRange.AsString(), resource.Metadata))
}
sourcePortRanges = append(sourcePortRanges, expandRange(resource.Properties.GetMapValue("sourcePortRange").AsString(), resource.Metadata))
destinationAddressPrefixes := resource.Properties.GetMapValue("destinationAddressPrefixes").AsStringValuesList("")
destinationAddressPrefixes = append(destinationAddressPrefixes, resource.Properties.GetMapValue("destinationAddressPrefix").AsStringValue("", resource.Metadata))
var destinationPortRanges []network.PortRange
for _, portRange := range resource.Properties.GetMapValue("destinationPortRanges").AsList() {
destinationPortRanges = append(destinationPortRanges, expandRange(portRange.AsString(), resource.Metadata))
}
destinationPortRanges = append(destinationPortRanges, expandRange(resource.Properties.GetMapValue("destinationPortRange").AsString(), resource.Metadata))
allow := defsecTypes.BoolDefault(false, resource.Metadata)
if resource.Properties.GetMapValue("access").AsString() == "Allow" {
allow = defsecTypes.Bool(true, resource.Metadata)
}
outbound := defsecTypes.BoolDefault(false, resource.Metadata)
if resource.Properties.GetMapValue("direction").AsString() == "Outbound" {
outbound = defsecTypes.Bool(true, resource.Metadata)
}
return network.SecurityGroupRule{
Metadata: resource.Metadata,
Outbound: outbound,
Allow: allow,
SourceAddresses: sourceAddressPrefixes,
SourcePorts: sourcePortRanges,
DestinationAddresses: destinationAddressPrefixes,
DestinationPorts: destinationPortRanges,
Protocol: resource.Properties.GetMapValue("protocol").AsStringValue("", resource.Metadata),
}
}
func adaptNetworkWatcherFlowLogs(deployment azure.Deployment) (flowLogs []network.NetworkWatcherFlowLog) {
for _, resource := range deployment.GetResourcesByType("Microsoft.Network/networkWatchers/flowLogs") {
flowLogs = append(flowLogs, adaptNetworkWatcherFlowLog(resource))
}
return flowLogs
}
func adaptNetworkWatcherFlowLog(resource azure.Resource) network.NetworkWatcherFlowLog {
return network.NetworkWatcherFlowLog{
Metadata: resource.Metadata,
RetentionPolicy: network.RetentionPolicy{
Metadata: resource.Metadata,
Enabled: resource.Properties.GetMapValue("retentionPolicy").GetMapValue("enabled").AsBoolValue(false, resource.Metadata),
Days: resource.Properties.GetMapValue("retentionPolicy").GetMapValue("days").AsIntValue(0, resource.Metadata),
},
}
}
func expandRange(r string, m defsecTypes.Metadata) network.PortRange {
start := 0
end := 65535
switch {
case r == "*":
case strings.Contains(r, "-"):
if parts := strings.Split(r, "-"); len(parts) == 2 {
if p1, err := strconv.ParseInt(parts[0], 10, 32); err == nil {
start = int(p1)
}
if p2, err := strconv.ParseInt(parts[1], 10, 32); err == nil {
end = int(p2)
}
}
default:
if val, err := strconv.ParseInt(r, 10, 32); err == nil {
start = int(val)
end = int(val)
}
}
return network.PortRange{
Metadata: m,
Start: start,
End: end,
}
}

View File

@@ -0,0 +1,43 @@
package securitycenter
import (
"github.com/aquasecurity/defsec/pkg/providers/azure/securitycenter"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func Adapt(deployment azure.Deployment) securitycenter.SecurityCenter {
return securitycenter.SecurityCenter{
Contacts: adaptContacts(deployment),
Subscriptions: adaptSubscriptions(deployment),
}
}
func adaptContacts(deployment azure.Deployment) (contacts []securitycenter.Contact) {
for _, resource := range deployment.GetResourcesByType("Microsoft.Security/securityContacts") {
contacts = append(contacts, adaptContact(resource))
}
return contacts
}
func adaptContact(resource azure.Resource) securitycenter.Contact {
return securitycenter.Contact{
Metadata: resource.Metadata,
EnableAlertNotifications: resource.Properties.GetMapValue("email").AsBoolValue(false, resource.Metadata),
Phone: resource.Properties.GetMapValue("phone").AsStringValue("", resource.Metadata),
}
}
func adaptSubscriptions(deployment azure.Deployment) (subscriptions []securitycenter.SubscriptionPricing) {
for _, resource := range deployment.GetResourcesByType("Microsoft.Security/pricings") {
subscriptions = append(subscriptions, adaptSubscription(resource))
}
return subscriptions
}
func adaptSubscription(resource azure.Resource) securitycenter.SubscriptionPricing {
return securitycenter.SubscriptionPricing{
Metadata: resource.Metadata,
Tier: resource.Properties.GetMapValue("pricingTier").AsStringValue("Free", resource.Metadata),
}
}

View File

@@ -0,0 +1,68 @@
package storage
import (
"strings"
"github.com/aquasecurity/defsec/pkg/providers/azure/storage"
"github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func Adapt(deployment azure.Deployment) storage.Storage {
return storage.Storage{
Accounts: adaptAccounts(deployment),
}
}
func adaptAccounts(deployment azure.Deployment) []storage.Account {
var accounts []storage.Account
for _, resource := range deployment.GetResourcesByType("Microsoft.Storage/storageAccounts") {
var networkRules []storage.NetworkRule
for _, acl := range resource.Properties.GetMapValue("networkAcls").AsList() {
var bypasses []types.StringValue
bypassProp := acl.GetMapValue("bypass")
for _, bypass := range strings.Split(bypassProp.AsString(), ",") {
bypasses = append(bypasses, types.String(bypass, bypassProp.GetMetadata()))
}
networkRules = append(networkRules, storage.NetworkRule{
Metadata: acl.GetMetadata(),
Bypass: bypasses,
AllowByDefault: types.Bool(acl.GetMapValue("defaultAction").EqualTo("Allow"), acl.GetMetadata()),
})
}
var queues []storage.Queue
for _, queueResource := range resource.GetResourcesByType("queueServices/queues") {
queues = append(queues, storage.Queue{
Metadata: queueResource.Metadata,
Name: queueResource.Name.AsStringValue("", queueResource.Metadata),
})
}
var containers []storage.Container
for _, containerResource := range resource.GetResourcesByType("containerServices/containers") {
containers = append(containers, storage.Container{
Metadata: containerResource.Metadata,
PublicAccess: containerResource.Properties.GetMapValue("publicAccess").AsStringValue("None", containerResource.Metadata),
})
}
account := storage.Account{
Metadata: resource.Metadata,
NetworkRules: networkRules,
EnforceHTTPS: resource.Properties.GetMapValue("supportsHttpsTrafficOnly").AsBoolValue(false, resource.Properties.GetMetadata()),
Containers: containers,
QueueProperties: storage.QueueProperties{
Metadata: resource.Properties.GetMetadata(),
EnableLogging: types.BoolDefault(false, resource.Properties.GetMetadata()),
},
MinimumTLSVersion: resource.Properties.GetMapValue("minimumTlsVersion").AsStringValue("TLS1_0", resource.Properties.GetMetadata()),
Queues: queues,
}
accounts = append(accounts, account)
}
return accounts
}

View File

@@ -0,0 +1,58 @@
package storage
import (
"testing"
azure2 "github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
"github.com/stretchr/testify/assert"
"github.com/aquasecurity/defsec/pkg/types"
"github.com/stretchr/testify/require"
)
func Test_AdaptStorageDefaults(t *testing.T) {
input := azure2.Deployment{
Resources: []azure2.Resource{
{
Type: azure2.NewValue("Microsoft.Storage/storageAccounts", types.NewTestMetadata()),
Properties: azure2.NewValue(map[string]azure2.Value{}, types.NewTestMetadata()),
},
},
}
output := Adapt(input)
require.Len(t, output.Accounts, 1)
account := output.Accounts[0]
assert.Equal(t, "TLS1_0", account.MinimumTLSVersion.Value())
assert.Equal(t, false, account.EnforceHTTPS.Value())
}
func Test_AdaptStorage(t *testing.T) {
input := azure2.Deployment{
Resources: []azure2.Resource{
{
Type: azure2.NewValue("Microsoft.Storage/storageAccounts", types.NewTestMetadata()),
Name: azure2.Value{},
Properties: azure2.NewValue(map[string]azure2.Value{
"minimumTlsVersion": azure2.NewValue("TLS1_2", types.NewTestMetadata()),
"supportsHttpsTrafficOnly": azure2.NewValue(true, types.NewTestMetadata()),
}, types.NewTestMetadata()),
},
},
}
output := Adapt(input)
require.Len(t, output.Accounts, 1)
account := output.Accounts[0]
assert.Equal(t, "TLS1_2", account.MinimumTLSVersion.Value())
assert.Equal(t, true, account.EnforceHTTPS.Value())
}

View File

@@ -0,0 +1,34 @@
package synapse
import (
"github.com/aquasecurity/defsec/pkg/providers/azure/synapse"
"github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/azure"
)
func Adapt(deployment azure.Deployment) synapse.Synapse {
return synapse.Synapse{
Workspaces: adaptWorkspaces(deployment),
}
}
func adaptWorkspaces(deployment azure.Deployment) (workspaces []synapse.Workspace) {
for _, resource := range deployment.GetResourcesByType("Microsoft.Synapse/workspaces") {
workspaces = append(workspaces, adaptWorkspace(resource))
}
return workspaces
}
func adaptWorkspace(resource azure.Resource) synapse.Workspace {
managedVirtualNetwork := resource.Properties.GetMapValue("managedVirtualNetwork").AsString()
enableManagedVirtualNetwork := types.BoolDefault(false, resource.Metadata)
if managedVirtualNetwork == "default" {
enableManagedVirtualNetwork = types.Bool(true, resource.Metadata)
}
return synapse.Workspace{
Metadata: resource.Metadata,
EnableManagedVirtualNetwork: enableManagedVirtualNetwork,
}
}

View File

@@ -0,0 +1,14 @@
package cloudformation
import (
"github.com/aquasecurity/defsec/pkg/state"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts the Cloudformation instance
func Adapt(cfFile parser.FileContext) *state.State {
return &state.State{
AWS: aws.Adapt(cfFile),
}
}

View File

@@ -0,0 +1,13 @@
package accessanalyzer
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/accessanalyzer"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an AccessAnalyzer instance
func Adapt(cfFile parser.FileContext) accessanalyzer.AccessAnalyzer {
return accessanalyzer.AccessAnalyzer{
Analyzers: getAccessAnalyzer(cfFile),
}
}

View File

@@ -0,0 +1,24 @@
package accessanalyzer
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/accessanalyzer"
"github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getAccessAnalyzer(ctx parser.FileContext) (analyzers []accessanalyzer.Analyzer) {
analyzersList := ctx.GetResourcesByType("AWS::AccessAnalyzer::Analyzer")
for _, r := range analyzersList {
aa := accessanalyzer.Analyzer{
Metadata: r.Metadata(),
Name: r.GetStringProperty("AnalyzerName"),
ARN: r.StringDefault(""),
Active: types.BoolDefault(false, r.Metadata()),
}
analyzers = append(analyzers, aa)
}
return analyzers
}

View File

@@ -0,0 +1,74 @@
package aws
import (
"github.com/aquasecurity/defsec/pkg/providers/aws"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/apigateway"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/athena"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/cloudfront"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/cloudtrail"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/cloudwatch"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/codebuild"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/config"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/documentdb"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/dynamodb"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/ec2"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/ecr"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/ecs"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/efs"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/eks"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/elasticache"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/elasticsearch"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/elb"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/iam"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/kinesis"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/lambda"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/mq"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/msk"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/neptune"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/rds"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/redshift"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/s3"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/sam"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/sns"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/sqs"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/ssm"
"github.com/aquasecurity/trivy/pkg/iac/adapters/cloudformation/aws/workspaces"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts a Cloudformation AWS instance
func Adapt(cfFile parser.FileContext) aws.AWS {
return aws.AWS{
APIGateway: apigateway.Adapt(cfFile),
Athena: athena.Adapt(cfFile),
Cloudfront: cloudfront.Adapt(cfFile),
CloudTrail: cloudtrail.Adapt(cfFile),
CloudWatch: cloudwatch.Adapt(cfFile),
CodeBuild: codebuild.Adapt(cfFile),
Config: config.Adapt(cfFile),
DocumentDB: documentdb.Adapt(cfFile),
DynamoDB: dynamodb.Adapt(cfFile),
EC2: ec2.Adapt(cfFile),
ECR: ecr.Adapt(cfFile),
ECS: ecs.Adapt(cfFile),
EFS: efs.Adapt(cfFile),
IAM: iam.Adapt(cfFile),
EKS: eks.Adapt(cfFile),
ElastiCache: elasticache.Adapt(cfFile),
Elasticsearch: elasticsearch.Adapt(cfFile),
ELB: elb.Adapt(cfFile),
MSK: msk.Adapt(cfFile),
MQ: mq.Adapt(cfFile),
Kinesis: kinesis.Adapt(cfFile),
Lambda: lambda.Adapt(cfFile),
Neptune: neptune.Adapt(cfFile),
RDS: rds.Adapt(cfFile),
Redshift: redshift.Adapt(cfFile),
S3: s3.Adapt(cfFile),
SAM: sam.Adapt(cfFile),
SNS: sns.Adapt(cfFile),
SQS: sqs.Adapt(cfFile),
SSM: ssm.Adapt(cfFile),
WorkSpaces: workspaces.Adapt(cfFile),
}
}

View File

@@ -0,0 +1,21 @@
package apigateway
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/apigateway"
v1 "github.com/aquasecurity/defsec/pkg/providers/aws/apigateway/v1"
v2 "github.com/aquasecurity/defsec/pkg/providers/aws/apigateway/v2"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an APIGateway instance
func Adapt(cfFile parser.FileContext) apigateway.APIGateway {
return apigateway.APIGateway{
V1: v1.APIGateway{
APIs: nil,
DomainNames: nil,
},
V2: v2.APIGateway{
APIs: getApis(cfFile),
},
}
}

View File

@@ -0,0 +1,68 @@
package apigateway
import (
v2 "github.com/aquasecurity/defsec/pkg/providers/aws/apigateway/v2"
"github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getApis(cfFile parser2.FileContext) (apis []v2.API) {
apiResources := cfFile.GetResourcesByType("AWS::ApiGatewayV2::Api")
for _, apiRes := range apiResources {
api := v2.API{
Metadata: apiRes.Metadata(),
Name: types.StringDefault("", apiRes.Metadata()),
ProtocolType: types.StringDefault("", apiRes.Metadata()),
Stages: getStages(apiRes.ID(), cfFile),
}
apis = append(apis, api)
}
return apis
}
func getStages(apiId string, cfFile parser2.FileContext) []v2.Stage {
var apiStages []v2.Stage
stageResources := cfFile.GetResourcesByType("AWS::ApiGatewayV2::Stage")
for _, r := range stageResources {
stageApiId := r.GetStringProperty("ApiId")
if stageApiId.Value() != apiId {
continue
}
s := v2.Stage{
Metadata: r.Metadata(),
Name: r.GetStringProperty("StageName"),
AccessLogging: getAccessLogging(r),
}
apiStages = append(apiStages, s)
}
return apiStages
}
func getAccessLogging(r *parser2.Resource) v2.AccessLogging {
loggingProp := r.GetProperty("AccessLogSettings")
if loggingProp.IsNil() {
return v2.AccessLogging{
Metadata: r.Metadata(),
CloudwatchLogGroupARN: types.StringDefault("", r.Metadata()),
}
}
destinationProp := r.GetProperty("AccessLogSettings.DestinationArn")
if destinationProp.IsNil() {
return v2.AccessLogging{
Metadata: loggingProp.Metadata(),
CloudwatchLogGroupARN: types.StringDefault("", r.Metadata()),
}
}
return v2.AccessLogging{
Metadata: destinationProp.Metadata(),
CloudwatchLogGroupARN: destinationProp.AsStringValue(),
}
}

View File

@@ -0,0 +1,14 @@
package athena
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/athena"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an Athena instance
func Adapt(cfFile parser.FileContext) athena.Athena {
return athena.Athena{
Databases: nil,
Workgroups: getWorkGroups(cfFile),
}
}

View File

@@ -0,0 +1,30 @@
package athena
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/athena"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getWorkGroups(cfFile parser.FileContext) []athena.Workgroup {
var workgroups []athena.Workgroup
workgroupResources := cfFile.GetResourcesByType("AWS::Athena::WorkGroup")
for _, r := range workgroupResources {
wg := athena.Workgroup{
Metadata: r.Metadata(),
Name: r.GetStringProperty("Name"),
Encryption: athena.EncryptionConfiguration{
Metadata: r.Metadata(),
Type: r.GetStringProperty("WorkGroupConfiguration.ResultConfiguration.EncryptionConfiguration.EncryptionOption"),
},
EnforceConfiguration: r.GetBoolProperty("WorkGroupConfiguration.EnforceWorkGroupConfiguration"),
}
workgroups = append(workgroups, wg)
}
return workgroups
}

View File

@@ -0,0 +1,13 @@
package cloudfront
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/cloudfront"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts a CloudFront instance
func Adapt(cfFile parser.FileContext) cloudfront.Cloudfront {
return cloudfront.Cloudfront{
Distributions: getDistributions(cfFile),
}
}

View File

@@ -0,0 +1,55 @@
package cloudfront
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/cloudfront"
"github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getDistributions(ctx parser2.FileContext) (distributions []cloudfront.Distribution) {
distributionResources := ctx.GetResourcesByType("AWS::CloudFront::Distribution")
for _, r := range distributionResources {
distribution := cloudfront.Distribution{
Metadata: r.Metadata(),
WAFID: r.GetStringProperty("DistributionConfig.WebACLId"),
Logging: cloudfront.Logging{
Metadata: r.Metadata(),
Bucket: r.GetStringProperty("DistributionConfig.Logging.Bucket"),
},
DefaultCacheBehaviour: getDefaultCacheBehaviour(r),
OrdererCacheBehaviours: nil,
ViewerCertificate: cloudfront.ViewerCertificate{
Metadata: r.Metadata(),
MinimumProtocolVersion: r.GetStringProperty("DistributionConfig.ViewerCertificate.MinimumProtocolVersion"),
},
}
distributions = append(distributions, distribution)
}
return distributions
}
func getDefaultCacheBehaviour(r *parser2.Resource) cloudfront.CacheBehaviour {
defaultCache := r.GetProperty("DistributionConfig.DefaultCacheBehavior")
if defaultCache.IsNil() {
return cloudfront.CacheBehaviour{
Metadata: r.Metadata(),
ViewerProtocolPolicy: types.StringDefault("allow-all", r.Metadata()),
}
}
protoProp := r.GetProperty("DistributionConfig.DefaultCacheBehavior.ViewerProtocolPolicy")
if protoProp.IsNotString() {
return cloudfront.CacheBehaviour{
Metadata: r.Metadata(),
ViewerProtocolPolicy: types.StringDefault("allow-all", r.Metadata()),
}
}
return cloudfront.CacheBehaviour{
Metadata: r.Metadata(),
ViewerProtocolPolicy: protoProp.AsStringValue(),
}
}

View File

@@ -0,0 +1,13 @@
package cloudtrail
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/cloudtrail"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts a CloudTrail instance
func Adapt(cfFile parser.FileContext) cloudtrail.CloudTrail {
return cloudtrail.CloudTrail{
Trails: getCloudTrails(cfFile),
}
}

View File

@@ -0,0 +1,27 @@
package cloudtrail
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/cloudtrail"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getCloudTrails(ctx parser.FileContext) (trails []cloudtrail.Trail) {
cloudtrailResources := ctx.GetResourcesByType("AWS::CloudTrail::Trail")
for _, r := range cloudtrailResources {
ct := cloudtrail.Trail{
Metadata: r.Metadata(),
Name: r.GetStringProperty("TrailName"),
EnableLogFileValidation: r.GetBoolProperty("EnableLogFileValidation"),
IsMultiRegion: r.GetBoolProperty("IsMultiRegionTrail"),
KMSKeyID: r.GetStringProperty("KmsKeyId"),
CloudWatchLogsLogGroupArn: r.GetStringProperty("CloudWatchLogsLogGroupArn"),
IsLogging: r.GetBoolProperty("IsLogging"),
BucketName: r.GetStringProperty("S3BucketName"),
}
trails = append(trails, ct)
}
return trails
}

View File

@@ -0,0 +1,14 @@
package cloudwatch
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/cloudwatch"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts a Cloudwatch instance
func Adapt(cfFile parser.FileContext) cloudwatch.CloudWatch {
return cloudwatch.CloudWatch{
LogGroups: getLogGroups(cfFile),
Alarms: nil,
}
}

View File

@@ -0,0 +1,26 @@
package cloudwatch
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/cloudwatch"
"github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getLogGroups(ctx parser.FileContext) (logGroups []cloudwatch.LogGroup) {
logGroupResources := ctx.GetResourcesByType("AWS::Logs::LogGroup")
for _, r := range logGroupResources {
group := cloudwatch.LogGroup{
Metadata: r.Metadata(),
Arn: types.StringDefault("", r.Metadata()),
Name: r.GetStringProperty("LogGroupName"),
KMSKeyID: r.GetStringProperty("KmsKeyId"),
RetentionInDays: r.GetIntProperty("RetentionInDays", 0),
MetricFilters: nil,
}
logGroups = append(logGroups, group)
}
return logGroups
}

View File

@@ -0,0 +1,13 @@
package codebuild
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/codebuild"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts a CodeBuild instance
func Adapt(cfFile parser.FileContext) codebuild.CodeBuild {
return codebuild.CodeBuild{
Projects: getProjects(cfFile),
}
}

View File

@@ -0,0 +1,63 @@
package codebuild
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/codebuild"
"github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getProjects(ctx parser2.FileContext) (projects []codebuild.Project) {
projectResources := ctx.GetResourcesByType("AWS::CodeBuild::Project")
for _, r := range projectResources {
project := codebuild.Project{
Metadata: r.Metadata(),
ArtifactSettings: getArtifactSettings(r),
SecondaryArtifactSettings: getSecondaryArtifactSettings(r),
}
projects = append(projects, project)
}
return projects
}
func getSecondaryArtifactSettings(r *parser2.Resource) (secondaryArtifacts []codebuild.ArtifactSettings) {
secondaryArtifactsList := r.GetProperty("SecondaryArtifacts")
if secondaryArtifactsList.IsNil() || !secondaryArtifactsList.IsList() {
return
}
for _, a := range secondaryArtifactsList.AsList() {
settings := codebuild.ArtifactSettings{
Metadata: secondaryArtifactsList.Metadata(),
EncryptionEnabled: types.BoolDefault(true, secondaryArtifactsList.Metadata()),
}
encryptionDisabled := a.GetProperty("EncryptionDisabled")
if encryptionDisabled.IsBool() {
settings.EncryptionEnabled = types.Bool(!encryptionDisabled.AsBool(), encryptionDisabled.Metadata())
}
secondaryArtifacts = append(secondaryArtifacts, settings)
}
return secondaryArtifacts
}
func getArtifactSettings(r *parser2.Resource) codebuild.ArtifactSettings {
settings := codebuild.ArtifactSettings{
Metadata: r.Metadata(),
EncryptionEnabled: types.BoolDefault(true, r.Metadata()),
}
artifactsProperty := r.GetProperty("Artifacts")
if artifactsProperty.IsNotNil() {
encryptionDisabled := artifactsProperty.GetProperty("EncryptionDisabled")
if encryptionDisabled.IsBool() {
settings.EncryptionEnabled = types.Bool(!encryptionDisabled.AsBool(), encryptionDisabled.Metadata())
}
}
return settings
}

View File

@@ -0,0 +1,70 @@
package config
import (
"context"
"testing"
"github.com/aquasecurity/defsec/pkg/providers/aws/config"
"github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/internal/testutil"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
"github.com/stretchr/testify/require"
)
func TestAdapt(t *testing.T) {
tests := []struct {
name string
source string
expected config.Config
}{
{
name: "Config aggregator with AccountAggregationSources",
source: `AWSTemplateFormatVersion: "2010-09-09"
Resources:
ConfigurationAggregator:
Type: AWS::Config::ConfigurationAggregator
Properties:
AccountAggregationSources:
- AllAwsRegions: "true"
`,
expected: config.Config{
ConfigurationAggregrator: config.ConfigurationAggregrator{
Metadata: types.NewTestMetadata(),
SourceAllRegions: types.Bool(true, types.NewTestMetadata()),
},
},
},
{
name: "Config aggregator with OrganizationAggregationSource",
source: `AWSTemplateFormatVersion: "2010-09-09"
Resources:
ConfigurationAggregator:
Type: AWS::Config::ConfigurationAggregator
Properties:
OrganizationAggregationSource:
AllAwsRegions: "true"
`,
expected: config.Config{
ConfigurationAggregrator: config.ConfigurationAggregrator{
Metadata: types.NewTestMetadata(),
SourceAllRegions: types.Bool(true, types.NewTestMetadata()),
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fs := testutil.CreateFS(t, map[string]string{
"template.yaml": tt.source,
})
p := parser.New()
fctx, err := p.ParseFile(context.TODO(), fs, "template.yaml")
require.NoError(t, err)
testutil.AssertDefsecEqual(t, tt.expected, Adapt(*fctx))
})
}
}

View File

@@ -0,0 +1,41 @@
package config
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/config"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getConfigurationAggregator(ctx parser2.FileContext) config.ConfigurationAggregrator {
aggregator := config.ConfigurationAggregrator{
Metadata: defsecTypes.NewUnmanagedMetadata(),
SourceAllRegions: defsecTypes.BoolDefault(false, ctx.Metadata()),
}
aggregatorResources := ctx.GetResourcesByType("AWS::Config::ConfigurationAggregator")
if len(aggregatorResources) == 0 {
return aggregator
}
return config.ConfigurationAggregrator{
Metadata: aggregatorResources[0].Metadata(),
SourceAllRegions: isSourcingAllRegions(aggregatorResources[0]),
}
}
func isSourcingAllRegions(r *parser2.Resource) defsecTypes.BoolValue {
accountProp := r.GetProperty("AccountAggregationSources")
if accountProp.IsNotNil() && accountProp.IsList() {
for _, a := range accountProp.AsList() {
regionsProp := a.GetProperty("AllAwsRegions")
if regionsProp.IsNotNil() {
return a.GetBoolProperty("AllAwsRegions")
}
}
}
return r.GetBoolProperty("OrganizationAggregationSource.AllAwsRegions")
}

View File

@@ -0,0 +1,13 @@
package config
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/config"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts a configurationaggregator instance
func Adapt(cfFile parser.FileContext) config.Config {
return config.Config{
ConfigurationAggregrator: getConfigurationAggregator(cfFile),
}
}

View File

@@ -0,0 +1,58 @@
package documentdb
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/documentdb"
"github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getClusters(ctx parser2.FileContext) (clusters []documentdb.Cluster) {
clusterResources := ctx.GetResourcesByType("AWS::DocDB::DBCluster")
for _, r := range clusterResources {
cluster := documentdb.Cluster{
Metadata: r.Metadata(),
Identifier: r.GetStringProperty("DBClusterIdentifier"),
EnabledLogExports: getLogExports(r),
Instances: nil,
BackupRetentionPeriod: r.GetIntProperty("BackupRetentionPeriod", 1),
StorageEncrypted: r.GetBoolProperty("StorageEncrypted"),
KMSKeyID: r.GetStringProperty("KmsKeyId"),
}
updateInstancesOnCluster(&cluster, ctx)
clusters = append(clusters, cluster)
}
return clusters
}
func updateInstancesOnCluster(cluster *documentdb.Cluster, ctx parser2.FileContext) {
instanceResources := ctx.GetResourcesByType("AWS::DocDB::DBInstance")
for _, r := range instanceResources {
clusterIdentifier := r.GetStringProperty("DBClusterIdentifier")
if clusterIdentifier == cluster.Identifier {
cluster.Instances = append(cluster.Instances, documentdb.Instance{
Metadata: r.Metadata(),
KMSKeyID: cluster.KMSKeyID,
})
}
}
}
func getLogExports(r *parser2.Resource) (logExports []types.StringValue) {
exportsList := r.GetProperty("EnableCloudwatchLogsExports")
if exportsList.IsNil() || exportsList.IsNotList() {
return logExports
}
for _, export := range exportsList.AsList() {
logExports = append(logExports, export.AsStringValue())
}
return logExports
}

View File

@@ -0,0 +1,13 @@
package documentdb
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/documentdb"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adaps a documentDB instance
func Adapt(cfFile parser.FileContext) documentdb.DocumentDB {
return documentdb.DocumentDB{
Clusters: getClusters(cfFile),
}
}

View File

@@ -0,0 +1,36 @@
package dynamodb
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/dynamodb"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getClusters(file parser.FileContext) (clusters []dynamodb.DAXCluster) {
clusterResources := file.GetResourcesByType("AWS::DAX::Cluster")
for _, r := range clusterResources {
cluster := dynamodb.DAXCluster{
Metadata: r.Metadata(),
ServerSideEncryption: dynamodb.ServerSideEncryption{
Metadata: r.Metadata(),
Enabled: defsecTypes.BoolDefault(false, r.Metadata()),
KMSKeyID: defsecTypes.StringDefault("", r.Metadata()),
},
PointInTimeRecovery: defsecTypes.BoolUnresolvable(r.Metadata()),
}
if sseProp := r.GetProperty("SSESpecification"); sseProp.IsNotNil() {
cluster.ServerSideEncryption = dynamodb.ServerSideEncryption{
Metadata: sseProp.Metadata(),
Enabled: r.GetBoolProperty("SSESpecification.SSEEnabled"),
KMSKeyID: defsecTypes.StringUnresolvable(sseProp.Metadata()),
}
}
clusters = append(clusters, cluster)
}
return clusters
}

View File

@@ -0,0 +1,13 @@
package dynamodb
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/dynamodb"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts a dynamodb instance
func Adapt(cfFile parser.FileContext) dynamodb.DynamoDB {
return dynamodb.DynamoDB{
DAXClusters: getClusters(cfFile),
}
}

View File

@@ -0,0 +1,175 @@
package ec2
import (
"context"
"testing"
"github.com/aquasecurity/defsec/pkg/providers/aws/ec2"
"github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/internal/testutil"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
"github.com/stretchr/testify/require"
)
func TestAdapt(t *testing.T) {
tests := []struct {
name string
source string
expected ec2.EC2
}{
{
name: "ec2 instance",
source: `AWSTemplateFormatVersion: 2010-09-09
Resources:
MyEC2Instance:
Type: AWS::EC2::Instance
Properties:
ImageId: "ami-79fd7eee"
KeyName: "testkey"
BlockDeviceMappings:
- DeviceName: "/dev/sdm"
Ebs:
VolumeType: "io1"
Iops: "200"
DeleteOnTermination: "false"
VolumeSize: "20"
Encrypted: true
- DeviceName: "/dev/sdk"
NoDevice: {}
`,
expected: ec2.EC2{
Instances: []ec2.Instance{
{
Metadata: types.NewTestMetadata(),
MetadataOptions: ec2.MetadataOptions{
HttpEndpoint: types.StringDefault("enabled", types.NewTestMetadata()),
HttpTokens: types.StringDefault("optional", types.NewTestMetadata()),
},
RootBlockDevice: &ec2.BlockDevice{
Metadata: types.NewTestMetadata(),
Encrypted: types.BoolDefault(true, types.NewTestMetadata()),
},
EBSBlockDevices: []*ec2.BlockDevice{
{
Metadata: types.NewTestMetadata(),
Encrypted: types.BoolDefault(false, types.NewTestMetadata()),
},
},
},
},
},
},
{
name: "ec2 instance with launch template, ref to name",
source: `AWSTemplateFormatVersion: 2010-09-09
Resources:
MyLaunchTemplate:
Type: AWS::EC2::LaunchTemplate
Properties:
LaunchTemplateName: MyTemplate
LaunchTemplateData:
MetadataOptions:
HttpEndpoint: enabled
HttpTokens: required
MyEC2Instance:
Type: AWS::EC2::Instance
Properties:
ImageId: "ami-79fd7eee"
LaunchTemplate:
LaunchTemplateName: MyTemplate
`,
expected: ec2.EC2{
LaunchTemplates: []ec2.LaunchTemplate{
{
Metadata: types.NewTestMetadata(),
Name: types.String("MyTemplate", types.NewTestMetadata()),
Instance: ec2.Instance{
Metadata: types.NewTestMetadata(),
MetadataOptions: ec2.MetadataOptions{
HttpEndpoint: types.String("enabled", types.NewTestMetadata()),
HttpTokens: types.String("required", types.NewTestMetadata()),
},
},
},
},
Instances: []ec2.Instance{
{
Metadata: types.NewTestMetadata(),
MetadataOptions: ec2.MetadataOptions{
HttpEndpoint: types.String("enabled", types.NewTestMetadata()),
HttpTokens: types.String("required", types.NewTestMetadata()),
},
RootBlockDevice: &ec2.BlockDevice{
Metadata: types.NewTestMetadata(),
Encrypted: types.Bool(false, types.NewTestMetadata()),
},
},
},
},
},
{
name: "ec2 instance with launch template, ref to id",
source: `AWSTemplateFormatVersion: 2010-09-09
Resources:
MyLaunchTemplate:
Type: AWS::EC2::LaunchTemplate
Properties:
LaunchTemplateName: MyTemplate
LaunchTemplateData:
MetadataOptions:
HttpEndpoint: enabled
HttpTokens: required
MyEC2Instance:
Type: AWS::EC2::Instance
Properties:
ImageId: "ami-79fd7eee"
LaunchTemplate:
LaunchTemplateId: !Ref MyLaunchTemplate
`,
expected: ec2.EC2{
LaunchTemplates: []ec2.LaunchTemplate{
{
Metadata: types.NewTestMetadata(),
Name: types.String("MyTemplate", types.NewTestMetadata()),
Instance: ec2.Instance{
Metadata: types.NewTestMetadata(),
MetadataOptions: ec2.MetadataOptions{
HttpEndpoint: types.String("enabled", types.NewTestMetadata()),
HttpTokens: types.String("required", types.NewTestMetadata()),
},
},
},
},
Instances: []ec2.Instance{
{
Metadata: types.NewTestMetadata(),
MetadataOptions: ec2.MetadataOptions{
HttpEndpoint: types.String("enabled", types.NewTestMetadata()),
HttpTokens: types.String("required", types.NewTestMetadata()),
},
RootBlockDevice: &ec2.BlockDevice{
Metadata: types.NewTestMetadata(),
Encrypted: types.Bool(false, types.NewTestMetadata()),
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fsys := testutil.CreateFS(t, map[string]string{
"main.yaml": tt.source,
})
fctx, err := parser.New().ParseFile(context.TODO(), fsys, "main.yaml")
require.NoError(t, err)
adapted := Adapt(*fctx)
testutil.AssertDefsecEqual(t, tt.expected, adapted)
})
}
}

View File

@@ -0,0 +1,20 @@
package ec2
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/ec2"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an EC2 instance
func Adapt(cfFile parser.FileContext) ec2.EC2 {
return ec2.EC2{
LaunchConfigurations: getLaunchConfigurations(cfFile),
LaunchTemplates: getLaunchTemplates(cfFile),
Instances: getInstances(cfFile),
VPCs: nil,
NetworkACLs: getNetworkACLs(cfFile),
SecurityGroups: getSecurityGroups(cfFile),
Subnets: getSubnets(cfFile),
Volumes: getVolumes(cfFile),
}
}

View File

@@ -0,0 +1,106 @@
package ec2
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/ec2"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getInstances(ctx parser2.FileContext) (instances []ec2.Instance) {
instanceResources := ctx.GetResourcesByType("AWS::EC2::Instance")
for _, r := range instanceResources {
instance := ec2.Instance{
Metadata: r.Metadata(),
// metadata not supported by CloudFormation at the moment -
// https://github.com/aws-cloudformation/cloudformation-coverage-roadmap/issues/655
MetadataOptions: ec2.MetadataOptions{
Metadata: r.Metadata(),
HttpTokens: defsecTypes.StringDefault("optional", r.Metadata()),
HttpEndpoint: defsecTypes.StringDefault("enabled", r.Metadata()),
},
UserData: r.GetStringProperty("UserData"),
}
if launchTemplate, ok := findRelatedLaunchTemplate(ctx, r); ok {
instance = launchTemplate.Instance
}
if instance.RootBlockDevice == nil {
instance.RootBlockDevice = &ec2.BlockDevice{
Metadata: r.Metadata(),
Encrypted: defsecTypes.BoolDefault(false, r.Metadata()),
}
}
blockDevices := getBlockDevices(r)
for i, device := range blockDevices {
copyDevice := device
if i == 0 {
instance.RootBlockDevice = copyDevice
continue
}
instance.EBSBlockDevices = append(instance.EBSBlockDevices, device)
}
instances = append(instances, instance)
}
return instances
}
func findRelatedLaunchTemplate(fctx parser2.FileContext, r *parser2.Resource) (ec2.LaunchTemplate, bool) {
launchTemplateRef := r.GetProperty("LaunchTemplate.LaunchTemplateName")
if launchTemplateRef.IsString() {
res := findLaunchTemplateByName(fctx, launchTemplateRef)
if res != nil {
return adaptLaunchTemplate(res), true
}
}
launchTemplateRef = r.GetProperty("LaunchTemplate.LaunchTemplateId")
if !launchTemplateRef.IsString() {
return ec2.LaunchTemplate{}, false
}
resource := fctx.GetResourceByLogicalID(launchTemplateRef.AsString())
if resource == nil {
return ec2.LaunchTemplate{}, false
}
return adaptLaunchTemplate(resource), true
}
func findLaunchTemplateByName(fctx parser2.FileContext, prop *parser2.Property) *parser2.Resource {
for _, res := range fctx.GetResourcesByType("AWS::EC2::LaunchTemplate") {
templateName := res.GetProperty("LaunchTemplateName")
if templateName.IsNotString() {
continue
}
if prop.EqualTo(templateName.AsString()) {
return res
}
}
return nil
}
func getBlockDevices(r *parser2.Resource) []*ec2.BlockDevice {
var blockDevices []*ec2.BlockDevice
devicesProp := r.GetProperty("BlockDeviceMappings")
if devicesProp.IsNil() {
return blockDevices
}
for _, d := range devicesProp.AsList() {
device := &ec2.BlockDevice{
Metadata: d.Metadata(),
Encrypted: d.GetBoolProperty("Ebs.Encrypted"),
}
blockDevices = append(blockDevices, device)
}
return blockDevices
}

View File

@@ -0,0 +1,48 @@
package ec2
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/ec2"
"github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getLaunchConfigurations(file parser.FileContext) (launchConfigurations []ec2.LaunchConfiguration) {
launchConfigResources := file.GetResourcesByType("AWS::AutoScaling::LaunchConfiguration")
for _, r := range launchConfigResources {
launchConfig := ec2.LaunchConfiguration{
Metadata: r.Metadata(),
Name: r.GetStringProperty("Name"),
AssociatePublicIP: r.GetBoolProperty("AssociatePublicIpAddress"),
MetadataOptions: ec2.MetadataOptions{
Metadata: r.Metadata(),
HttpTokens: types.StringDefault("optional", r.Metadata()),
HttpEndpoint: types.StringDefault("enabled", r.Metadata()),
},
UserData: r.GetStringProperty("UserData", ""),
}
if opts := r.GetProperty("MetadataOptions"); opts.IsNotNil() {
launchConfig.MetadataOptions = ec2.MetadataOptions{
Metadata: opts.Metadata(),
HttpTokens: opts.GetStringProperty("HttpTokens", "optional"),
HttpEndpoint: opts.GetStringProperty("HttpEndpoint", "enabled"),
}
}
blockDevices := getBlockDevices(r)
for i, device := range blockDevices {
copyDevice := device
if i == 0 {
launchConfig.RootBlockDevice = copyDevice
continue
}
launchConfig.EBSBlockDevices = append(launchConfig.EBSBlockDevices, device)
}
launchConfigurations = append(launchConfigurations, launchConfig)
}
return launchConfigurations
}

View File

@@ -0,0 +1,56 @@
package ec2
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/ec2"
"github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getLaunchTemplates(file parser2.FileContext) (templates []ec2.LaunchTemplate) {
launchConfigResources := file.GetResourcesByType("AWS::EC2::LaunchTemplate")
for _, r := range launchConfigResources {
templates = append(templates, adaptLaunchTemplate(r))
}
return templates
}
func adaptLaunchTemplate(r *parser2.Resource) ec2.LaunchTemplate {
launchTemplate := ec2.LaunchTemplate{
Metadata: r.Metadata(),
Name: r.GetStringProperty("LaunchTemplateName", ""),
Instance: ec2.Instance{
Metadata: r.Metadata(),
MetadataOptions: ec2.MetadataOptions{
Metadata: r.Metadata(),
HttpTokens: types.StringDefault("optional", r.Metadata()),
HttpEndpoint: types.StringDefault("enabled", r.Metadata()),
},
UserData: types.StringDefault("", r.Metadata()),
},
}
if data := r.GetProperty("LaunchTemplateData"); data.IsNotNil() {
if opts := data.GetProperty("MetadataOptions"); opts.IsNotNil() {
launchTemplate.MetadataOptions = ec2.MetadataOptions{
Metadata: opts.Metadata(),
HttpTokens: opts.GetStringProperty("HttpTokens", "optional"),
HttpEndpoint: opts.GetStringProperty("HttpEndpoint", "enabled"),
}
}
launchTemplate.Instance.UserData = data.GetStringProperty("UserData", "")
blockDevices := getBlockDevices(r)
for i, device := range blockDevices {
copyDevice := device
if i == 0 {
launchTemplate.RootBlockDevice = copyDevice
} else {
launchTemplate.EBSBlockDevices = append(launchTemplate.EBSBlockDevices, device)
}
}
}
return launchTemplate
}

View File

@@ -0,0 +1,69 @@
package ec2
import (
"strconv"
"github.com/aquasecurity/defsec/pkg/providers/aws/ec2"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getNetworkACLs(ctx parser.FileContext) (acls []ec2.NetworkACL) {
for _, aclResource := range ctx.GetResourcesByType("AWS::EC2::NetworkAcl") {
acl := ec2.NetworkACL{
Metadata: aclResource.Metadata(),
Rules: getRules(aclResource.ID(), ctx),
IsDefaultRule: defsecTypes.BoolDefault(false, aclResource.Metadata()),
}
acls = append(acls, acl)
}
return acls
}
func getRules(id string, ctx parser.FileContext) (rules []ec2.NetworkACLRule) {
for _, ruleResource := range ctx.GetResourcesByType("AWS::EC2::NetworkAclEntry") {
aclID := ruleResource.GetProperty("NetworkAclId")
if aclID.IsString() && aclID.AsString() == id {
rule := ec2.NetworkACLRule{
Metadata: ruleResource.Metadata(),
Type: defsecTypes.StringDefault(ec2.TypeIngress, ruleResource.Metadata()),
Action: defsecTypes.StringDefault(ec2.ActionAllow, ruleResource.Metadata()),
Protocol: defsecTypes.String("-1", ruleResource.Metadata()),
CIDRs: nil,
}
if egressProperty := ruleResource.GetProperty("Egress"); egressProperty.IsBool() {
if egressProperty.AsBool() {
rule.Type = defsecTypes.String(ec2.TypeEgress, egressProperty.Metadata())
} else {
rule.Type = defsecTypes.String(ec2.TypeIngress, egressProperty.Metadata())
}
}
if actionProperty := ruleResource.GetProperty("RuleAction"); actionProperty.IsString() {
if actionProperty.AsString() == ec2.ActionAllow {
rule.Action = defsecTypes.String(ec2.ActionAllow, actionProperty.Metadata())
} else {
rule.Action = defsecTypes.String(ec2.ActionDeny, actionProperty.Metadata())
}
}
if protocolProperty := ruleResource.GetProperty("Protocol"); protocolProperty.IsInt() {
protocol := protocolProperty.AsIntValue().Value()
rule.Protocol = defsecTypes.String(strconv.Itoa(protocol), protocolProperty.Metadata())
}
if ipv4Cidr := ruleResource.GetProperty("CidrBlock"); ipv4Cidr.IsString() {
rule.CIDRs = append(rule.CIDRs, ipv4Cidr.AsStringValue())
}
if ipv6Cidr := ruleResource.GetProperty("Ipv6CidrBlock"); ipv6Cidr.IsString() {
rule.CIDRs = append(rule.CIDRs, ipv6Cidr.AsStringValue())
}
rules = append(rules, rule)
}
}
return rules
}

View File

@@ -0,0 +1,68 @@
package ec2
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/ec2"
"github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getSecurityGroups(ctx parser2.FileContext) (groups []ec2.SecurityGroup) {
for _, r := range ctx.GetResourcesByType("AWS::EC2::SecurityGroup") {
group := ec2.SecurityGroup{
Metadata: r.Metadata(),
Description: r.GetStringProperty("GroupDescription"),
IngressRules: getIngressRules(r),
EgressRules: getEgressRules(r),
IsDefault: types.Bool(r.GetStringProperty("GroupName").EqualTo("default"), r.Metadata()),
VPCID: r.GetStringProperty("VpcId"),
}
groups = append(groups, group)
}
return groups
}
func getIngressRules(r *parser2.Resource) (sgRules []ec2.SecurityGroupRule) {
if ingressProp := r.GetProperty("SecurityGroupIngress"); ingressProp.IsList() {
for _, ingress := range ingressProp.AsList() {
rule := ec2.SecurityGroupRule{
Metadata: ingress.Metadata(),
Description: ingress.GetStringProperty("Description"),
CIDRs: nil,
}
v4Cidr := ingress.GetProperty("CidrIp")
if v4Cidr.IsString() && v4Cidr.AsStringValue().IsNotEmpty() {
rule.CIDRs = append(rule.CIDRs, types.StringExplicit(v4Cidr.AsString(), v4Cidr.Metadata()))
}
v6Cidr := ingress.GetProperty("CidrIpv6")
if v6Cidr.IsString() && v6Cidr.AsStringValue().IsNotEmpty() {
rule.CIDRs = append(rule.CIDRs, types.StringExplicit(v6Cidr.AsString(), v6Cidr.Metadata()))
}
sgRules = append(sgRules, rule)
}
}
return sgRules
}
func getEgressRules(r *parser2.Resource) (sgRules []ec2.SecurityGroupRule) {
if egressProp := r.GetProperty("SecurityGroupEgress"); egressProp.IsList() {
for _, egress := range egressProp.AsList() {
rule := ec2.SecurityGroupRule{
Metadata: egress.Metadata(),
Description: egress.GetStringProperty("Description"),
}
v4Cidr := egress.GetProperty("CidrIp")
if v4Cidr.IsString() && v4Cidr.AsStringValue().IsNotEmpty() {
rule.CIDRs = append(rule.CIDRs, types.StringExplicit(v4Cidr.AsString(), v4Cidr.Metadata()))
}
v6Cidr := egress.GetProperty("CidrIpv6")
if v6Cidr.IsString() && v6Cidr.AsStringValue().IsNotEmpty() {
rule.CIDRs = append(rule.CIDRs, types.StringExplicit(v6Cidr.AsString(), v6Cidr.Metadata()))
}
sgRules = append(sgRules, rule)
}
}
return sgRules
}

View File

@@ -0,0 +1,21 @@
package ec2
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/ec2"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getSubnets(ctx parser.FileContext) (subnets []ec2.Subnet) {
subnetResources := ctx.GetResourcesByType("AWS::EC2::Subnet")
for _, r := range subnetResources {
subnet := ec2.Subnet{
Metadata: r.Metadata(),
MapPublicIpOnLaunch: r.GetBoolProperty("MapPublicIpOnLaunch"),
}
subnets = append(subnets, subnet)
}
return subnets
}

View File

@@ -0,0 +1,25 @@
package ec2
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/ec2"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getVolumes(ctx parser.FileContext) (volumes []ec2.Volume) {
volumeResources := ctx.GetResourcesByType("AWS::EC2::Volume")
for _, r := range volumeResources {
volume := ec2.Volume{
Metadata: r.Metadata(),
Encryption: ec2.Encryption{
Metadata: r.Metadata(),
Enabled: r.GetBoolProperty("Encrypted"),
KMSKeyID: r.GetStringProperty("KmsKeyId"),
},
}
volumes = append(volumes, volume)
}
return volumes
}

View File

@@ -0,0 +1,13 @@
package ecr
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/ecr"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an ECR instance
func Adapt(cfFile parser.FileContext) ecr.ECR {
return ecr.ECR{
Repositories: getRepositories(cfFile),
}
}

View File

@@ -0,0 +1,91 @@
package ecr
import (
"fmt"
"github.com/liamg/iamgo"
"github.com/aquasecurity/defsec/pkg/providers/aws/ecr"
"github.com/aquasecurity/defsec/pkg/providers/aws/iam"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getRepositories(ctx parser2.FileContext) (repositories []ecr.Repository) {
repositoryResources := ctx.GetResourcesByType("AWS::ECR::Repository")
for _, r := range repositoryResources {
repository := ecr.Repository{
Metadata: r.Metadata(),
ImageScanning: ecr.ImageScanning{
Metadata: r.Metadata(),
ScanOnPush: defsecTypes.BoolDefault(false, r.Metadata()),
},
ImageTagsImmutable: hasImmutableImageTags(r),
Policies: nil,
Encryption: ecr.Encryption{
Metadata: r.Metadata(),
Type: defsecTypes.StringDefault(ecr.EncryptionTypeAES256, r.Metadata()),
KMSKeyID: defsecTypes.StringDefault("", r.Metadata()),
},
}
if imageScanningProp := r.GetProperty("ImageScanningConfiguration"); imageScanningProp.IsNotNil() {
repository.ImageScanning = ecr.ImageScanning{
Metadata: imageScanningProp.Metadata(),
ScanOnPush: imageScanningProp.GetBoolProperty("ScanOnPush", false),
}
}
if encProp := r.GetProperty("EncryptionConfiguration"); encProp.IsNotNil() {
repository.Encryption = ecr.Encryption{
Metadata: encProp.Metadata(),
Type: encProp.GetStringProperty("EncryptionType", ecr.EncryptionTypeAES256),
KMSKeyID: encProp.GetStringProperty("KmsKey", ""),
}
}
if policy, err := getPolicy(r); err == nil {
repository.Policies = append(repository.Policies, *policy)
}
repositories = append(repositories, repository)
}
return repositories
}
func getPolicy(r *parser2.Resource) (*iam.Policy, error) {
policyProp := r.GetProperty("RepositoryPolicyText")
if policyProp.IsNil() {
return nil, fmt.Errorf("missing policy")
}
parsed, err := iamgo.Parse(policyProp.GetJsonBytes())
if err != nil {
return nil, err
}
return &iam.Policy{
Metadata: policyProp.Metadata(),
Name: defsecTypes.StringDefault("", policyProp.Metadata()),
Document: iam.Document{
Metadata: policyProp.Metadata(),
Parsed: *parsed,
},
Builtin: defsecTypes.Bool(false, policyProp.Metadata()),
}, nil
}
func hasImmutableImageTags(r *parser2.Resource) defsecTypes.BoolValue {
mutabilityProp := r.GetProperty("ImageTagMutability")
if mutabilityProp.IsNil() {
return defsecTypes.BoolDefault(false, r.Metadata())
}
if !mutabilityProp.EqualTo("IMMUTABLE") {
return defsecTypes.Bool(false, mutabilityProp.Metadata())
}
return defsecTypes.Bool(true, mutabilityProp.Metadata())
}

View File

@@ -0,0 +1,57 @@
package ecs
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/ecs"
"github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getClusters(ctx parser2.FileContext) (clusters []ecs.Cluster) {
clusterResources := ctx.GetResourcesByType("AWS::ECS::Cluster")
for _, r := range clusterResources {
cluster := ecs.Cluster{
Metadata: r.Metadata(),
Settings: getClusterSettings(r),
}
clusters = append(clusters, cluster)
}
return clusters
}
func getClusterSettings(r *parser2.Resource) ecs.ClusterSettings {
clusterSettings := ecs.ClusterSettings{
Metadata: r.Metadata(),
ContainerInsightsEnabled: types.BoolDefault(false, r.Metadata()),
}
clusterSettingMap := r.GetProperty("ClusterSettings")
if clusterSettingMap.IsNil() || clusterSettingMap.IsNotList() {
return clusterSettings
}
clusterSettings.Metadata = clusterSettingMap.Metadata()
for _, setting := range clusterSettingMap.AsList() {
checkProperty(setting, &clusterSettings)
}
return clusterSettings
}
func checkProperty(setting *parser2.Property, clusterSettings *ecs.ClusterSettings) {
settingMap := setting.AsMap()
name := settingMap["Name"]
if name.IsNotNil() && name.EqualTo("containerInsights") {
value := settingMap["Value"]
if value.IsNotNil() && value.EqualTo("enabled") {
clusterSettings.ContainerInsightsEnabled = types.Bool(true, value.Metadata())
}
}
}

View File

@@ -0,0 +1,14 @@
package ecs
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/ecs"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an ECS instance
func Adapt(cfFile parser.FileContext) ecs.ECS {
return ecs.ECS{
Clusters: getClusters(cfFile),
TaskDefinitions: getTaskDefinitions(cfFile),
}
}

View File

@@ -0,0 +1,86 @@
package ecs
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/ecs"
"github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getTaskDefinitions(ctx parser2.FileContext) (taskDefinitions []ecs.TaskDefinition) {
taskDefResources := ctx.GetResourcesByType("AWS::ECS::TaskDefinition")
for _, r := range taskDefResources {
definitions, _ := getContainerDefinitions(r)
taskDef := ecs.TaskDefinition{
Metadata: r.Metadata(),
Volumes: getVolumes(r),
ContainerDefinitions: definitions,
}
taskDefinitions = append(taskDefinitions, taskDef)
}
return taskDefinitions
}
func getContainerDefinitions(r *parser2.Resource) ([]ecs.ContainerDefinition, error) {
var definitions []ecs.ContainerDefinition
containerDefs := r.GetProperty("ContainerDefinitions")
if containerDefs.IsNil() || containerDefs.IsNotList() {
return definitions, nil
}
for _, containerDef := range containerDefs.AsList() {
var envVars []ecs.EnvVar
envVarsList := containerDef.GetProperty("Environment")
if envVarsList.IsNotNil() && envVarsList.IsList() {
for _, envVar := range envVarsList.AsList() {
envVars = append(envVars, ecs.EnvVar{
Name: envVar.GetStringProperty("Name", "").Value(),
Value: envVar.GetStringProperty("Value", "").Value(),
})
}
}
definition := ecs.ContainerDefinition{
Metadata: containerDef.Metadata(),
Name: containerDef.GetStringProperty("Name", ""),
Image: containerDef.GetStringProperty("Image", ""),
CPU: containerDef.GetIntProperty("CPU", 1),
Memory: containerDef.GetIntProperty("Memory", 128),
Essential: containerDef.GetBoolProperty("Essential", false),
Privileged: containerDef.GetBoolProperty("Privileged", false),
Environment: envVars,
PortMappings: nil,
}
definitions = append(definitions, definition)
}
if containerDefs.IsNotNil() && containerDefs.IsString() {
return ecs.CreateDefinitionsFromString(r.Metadata(), containerDefs.AsString())
}
return definitions, nil
}
func getVolumes(r *parser2.Resource) (volumes []ecs.Volume) {
volumesList := r.GetProperty("Volumes")
if volumesList.IsNil() || volumesList.IsNotList() {
return volumes
}
for _, v := range volumesList.AsList() {
volume := ecs.Volume{
Metadata: r.Metadata(),
EFSVolumeConfiguration: ecs.EFSVolumeConfiguration{
Metadata: r.Metadata(),
TransitEncryptionEnabled: types.BoolDefault(false, r.Metadata()),
},
}
transitProp := v.GetProperty("EFSVolumeConfiguration.TransitEncryption")
if transitProp.IsNotNil() && transitProp.EqualTo("enabled", parser2.IgnoreCase) {
volume.EFSVolumeConfiguration.TransitEncryptionEnabled = types.Bool(true, transitProp.Metadata())
}
volumes = append(volumes, volume)
}
return volumes
}

View File

@@ -0,0 +1,13 @@
package efs
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/efs"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an EFS instance
func Adapt(cfFile parser.FileContext) efs.EFS {
return efs.EFS{
FileSystems: getFileSystems(cfFile),
}
}

View File

@@ -0,0 +1,23 @@
package efs
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/efs"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getFileSystems(ctx parser.FileContext) (filesystems []efs.FileSystem) {
filesystemResources := ctx.GetResourcesByType("AWS::EFS::FileSystem")
for _, r := range filesystemResources {
filesystem := efs.FileSystem{
Metadata: r.Metadata(),
Encrypted: r.GetBoolProperty("Encrypted"),
}
filesystems = append(filesystems, filesystem)
}
return filesystems
}

View File

@@ -0,0 +1,56 @@
package eks
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/eks"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getClusters(ctx parser2.FileContext) (clusters []eks.Cluster) {
clusterResources := ctx.GetResourcesByType("AWS::EKS::Cluster")
for _, r := range clusterResources {
cluster := eks.Cluster{
Metadata: r.Metadata(),
// Logging not supported for cloudformation https://github.com/aws/containers-roadmap/issues/242
Logging: eks.Logging{
Metadata: r.Metadata(),
API: defsecTypes.BoolUnresolvable(r.Metadata()),
Audit: defsecTypes.BoolUnresolvable(r.Metadata()),
Authenticator: defsecTypes.BoolUnresolvable(r.Metadata()),
ControllerManager: defsecTypes.BoolUnresolvable(r.Metadata()),
Scheduler: defsecTypes.BoolUnresolvable(r.Metadata()),
},
Encryption: getEncryptionConfig(r),
// endpoint protection not supported - https://github.com/aws/containers-roadmap/issues/242
PublicAccessEnabled: defsecTypes.BoolUnresolvable(r.Metadata()),
PublicAccessCIDRs: nil,
}
clusters = append(clusters, cluster)
}
return clusters
}
func getEncryptionConfig(r *parser2.Resource) eks.Encryption {
encryption := eks.Encryption{
Metadata: r.Metadata(),
Secrets: defsecTypes.BoolDefault(false, r.Metadata()),
KMSKeyID: defsecTypes.StringDefault("", r.Metadata()),
}
if encProp := r.GetProperty("EncryptionConfig"); encProp.IsNotNil() {
encryption.Metadata = encProp.Metadata()
encryption.KMSKeyID = encProp.GetStringProperty("Provider.KeyArn")
resourcesProp := encProp.GetProperty("Resources")
if resourcesProp.IsList() {
if resourcesProp.Contains("secrets") {
encryption.Secrets = defsecTypes.Bool(true, resourcesProp.Metadata())
}
}
}
return encryption
}

View File

@@ -0,0 +1,13 @@
package eks
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/eks"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an EKS instance
func Adapt(cfFile parser.FileContext) eks.EKS {
return eks.EKS{
Clusters: getClusters(cfFile),
}
}

View File

@@ -0,0 +1,24 @@
package elasticache
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/elasticache"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getClusterGroups(ctx parser.FileContext) (clusters []elasticache.Cluster) {
clusterResources := ctx.GetResourcesByType("AWS::ElastiCache::CacheCluster")
for _, r := range clusterResources {
cluster := elasticache.Cluster{
Metadata: r.Metadata(),
Engine: r.GetStringProperty("Engine"),
NodeType: r.GetStringProperty("CacheNodeType"),
SnapshotRetentionLimit: r.GetIntProperty("SnapshotRetentionLimit"),
}
clusters = append(clusters, cluster)
}
return clusters
}

View File

@@ -0,0 +1,15 @@
package elasticache
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/elasticache"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an ElasticCache instance
func Adapt(cfFile parser.FileContext) elasticache.ElastiCache {
return elasticache.ElastiCache{
Clusters: getClusterGroups(cfFile),
ReplicationGroups: getReplicationGroups(cfFile),
SecurityGroups: getSecurityGroups(cfFile),
}
}

View File

@@ -0,0 +1,23 @@
package elasticache
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/elasticache"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getReplicationGroups(ctx parser.FileContext) (replicationGroups []elasticache.ReplicationGroup) {
replicationGroupResources := ctx.GetResourcesByType("AWS::ElastiCache::ReplicationGroup")
for _, r := range replicationGroupResources {
replicationGroup := elasticache.ReplicationGroup{
Metadata: r.Metadata(),
TransitEncryptionEnabled: r.GetBoolProperty("TransitEncryptionEnabled"),
AtRestEncryptionEnabled: r.GetBoolProperty("AtRestEncryptionEnabled"),
}
replicationGroups = append(replicationGroups, replicationGroup)
}
return replicationGroups
}

View File

@@ -0,0 +1,22 @@
package elasticache
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/elasticache"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getSecurityGroups(ctx parser.FileContext) (securityGroups []elasticache.SecurityGroup) {
sgResources := ctx.GetResourcesByType("AWS::ElastiCache::SecurityGroup")
for _, r := range sgResources {
sg := elasticache.SecurityGroup{
Metadata: r.Metadata(),
Description: r.GetStringProperty("Description"),
}
securityGroups = append(securityGroups, sg)
}
return securityGroups
}

View File

@@ -0,0 +1,84 @@
package elasticsearch
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/elasticsearch"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getDomains(ctx parser.FileContext) (domains []elasticsearch.Domain) {
domainResources := ctx.GetResourcesByType("AWS::Elasticsearch::Domain", "AWS::OpenSearchService::Domain")
for _, r := range domainResources {
domain := elasticsearch.Domain{
Metadata: r.Metadata(),
DomainName: r.GetStringProperty("DomainName"),
AccessPolicies: r.GetStringProperty("AccessPolicies"),
DedicatedMasterEnabled: r.GetBoolProperty("ElasticsearchClusterConfig.DedicatedMasterEnabled"),
VpcId: defsecTypes.String("", r.Metadata()),
LogPublishing: elasticsearch.LogPublishing{
Metadata: r.Metadata(),
AuditEnabled: defsecTypes.BoolDefault(false, r.Metadata()),
CloudWatchLogGroupArn: defsecTypes.String("", r.Metadata()),
},
TransitEncryption: elasticsearch.TransitEncryption{
Metadata: r.Metadata(),
Enabled: defsecTypes.BoolDefault(false, r.Metadata()),
},
AtRestEncryption: elasticsearch.AtRestEncryption{
Metadata: r.Metadata(),
Enabled: defsecTypes.BoolDefault(false, r.Metadata()),
KmsKeyId: defsecTypes.String("", r.Metadata()),
},
Endpoint: elasticsearch.Endpoint{
Metadata: r.Metadata(),
EnforceHTTPS: defsecTypes.BoolDefault(false, r.Metadata()),
TLSPolicy: defsecTypes.StringDefault("Policy-Min-TLS-1-0-2019-07", r.Metadata()),
},
ServiceSoftwareOptions: elasticsearch.ServiceSoftwareOptions{
Metadata: r.Metadata(),
CurrentVersion: defsecTypes.String("", r.Metadata()),
NewVersion: defsecTypes.String("", r.Metadata()),
UpdateStatus: defsecTypes.String("", r.Metadata()),
UpdateAvailable: defsecTypes.Bool(false, r.Metadata()),
},
}
if prop := r.GetProperty("LogPublishingOptions"); prop.IsNotNil() {
domain.LogPublishing = elasticsearch.LogPublishing{
Metadata: prop.Metadata(),
AuditEnabled: prop.GetBoolProperty("AUDIT_LOGS.Enabled", false),
CloudWatchLogGroupArn: prop.GetStringProperty("CloudWatchLogsLogGroupArn"),
}
}
if prop := r.GetProperty("NodeToNodeEncryptionOptions"); prop.IsNotNil() {
domain.TransitEncryption = elasticsearch.TransitEncryption{
Metadata: prop.Metadata(),
Enabled: prop.GetBoolProperty("Enabled", false),
}
}
if prop := r.GetProperty("EncryptionAtRestOptions"); prop.IsNotNil() {
domain.AtRestEncryption = elasticsearch.AtRestEncryption{
Metadata: prop.Metadata(),
Enabled: prop.GetBoolProperty("Enabled", false),
KmsKeyId: prop.GetStringProperty("KmsKeyId"),
}
}
if prop := r.GetProperty("DomainEndpointOptions"); prop.IsNotNil() {
domain.Endpoint = elasticsearch.Endpoint{
Metadata: prop.Metadata(),
EnforceHTTPS: prop.GetBoolProperty("EnforceHTTPS", false),
TLSPolicy: prop.GetStringProperty("TLSSecurityPolicy", "Policy-Min-TLS-1-0-2019-07"),
}
}
domains = append(domains, domain)
}
return domains
}

View File

@@ -0,0 +1,13 @@
package elasticsearch
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/elasticsearch"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an ElasticSearch instance
func Adapt(cfFile parser.FileContext) elasticsearch.Elasticsearch {
return elasticsearch.Elasticsearch{
Domains: getDomains(cfFile),
}
}

View File

@@ -0,0 +1,72 @@
package elb
import (
"context"
"testing"
"github.com/aquasecurity/defsec/pkg/providers/aws/elb"
"github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/internal/testutil"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
"github.com/stretchr/testify/require"
)
func TestAdapt(t *testing.T) {
tests := []struct {
name string
source string
expected elb.ELB
}{
{
name: "LoadBalancer",
source: `AWSTemplateFormatVersion: "2010-09-09"
Resources:
LoadBalancer:
Type: AWS::ElasticLoadBalancingV2::LoadBalancer
DependsOn:
- ALBLogsBucketPermission
Properties:
Name: "k8s-dev"
IpAddressType: ipv4
LoadBalancerAttributes:
- Key: routing.http2.enabled
Value: "true"
- Key: deletion_protection.enabled
Value: "true"
- Key: routing.http.drop_invalid_header_fields.enabled
Value: "true"
- Key: access_logs.s3.enabled
Value: "true"
Tags:
- Key: ingress.k8s.aws/resource
Value: LoadBalancer
- Key: elbv2.k8s.aws/cluster
Value: "biomage-dev"
Type: application
`,
expected: elb.ELB{
LoadBalancers: []elb.LoadBalancer{
{
Metadata: types.NewTestMetadata(),
Type: types.String("application", types.NewTestMetadata()),
DropInvalidHeaderFields: types.Bool(true, types.NewTestMetadata()),
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fs := testutil.CreateFS(t, map[string]string{
"template.yaml": tt.source,
})
p := parser.New()
fctx, err := p.ParseFile(context.TODO(), fs, "template.yaml")
require.NoError(t, err)
testutil.AssertDefsecEqual(t, tt.expected, Adapt(*fctx))
})
}
}

View File

@@ -0,0 +1,13 @@
package elb
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/elb"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an ELB instance
func Adapt(cfFile parser.FileContext) elb.ELB {
return elb.ELB{
LoadBalancers: getLoadBalancers(cfFile),
}
}

View File

@@ -0,0 +1,81 @@
package elb
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/elb"
"github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getLoadBalancers(ctx parser2.FileContext) (loadbalancers []elb.LoadBalancer) {
loadBalanacerResources := ctx.GetResourcesByType("AWS::ElasticLoadBalancingV2::LoadBalancer")
for _, r := range loadBalanacerResources {
lb := elb.LoadBalancer{
Metadata: r.Metadata(),
Type: r.GetStringProperty("Type", "application"),
DropInvalidHeaderFields: checkForDropInvalidHeaders(r),
Internal: isInternal(r),
Listeners: getListeners(r, ctx),
}
loadbalancers = append(loadbalancers, lb)
}
return loadbalancers
}
func getListeners(lbr *parser2.Resource, ctx parser2.FileContext) (listeners []elb.Listener) {
listenerResources := ctx.GetResourcesByType("AWS::ElasticLoadBalancingV2::Listener")
for _, r := range listenerResources {
if r.GetStringProperty("LoadBalancerArn").Value() == lbr.ID() {
listener := elb.Listener{
Metadata: r.Metadata(),
Protocol: r.GetStringProperty("Protocol", "HTTP"),
TLSPolicy: r.GetStringProperty("SslPolicy", ""),
DefaultActions: getDefaultListenerActions(r),
}
listeners = append(listeners, listener)
}
}
return listeners
}
func getDefaultListenerActions(r *parser2.Resource) (actions []elb.Action) {
defaultActionsProp := r.GetProperty("DefaultActions")
if defaultActionsProp.IsNotList() {
return actions
}
for _, action := range defaultActionsProp.AsList() {
actions = append(actions, elb.Action{
Metadata: action.Metadata(),
Type: action.GetProperty("Type").AsStringValue(),
})
}
return actions
}
func isInternal(r *parser2.Resource) types.BoolValue {
schemeProp := r.GetProperty("Scheme")
if schemeProp.IsNotString() {
return r.BoolDefault(false)
}
return types.Bool(schemeProp.EqualTo("internal", parser2.IgnoreCase), schemeProp.Metadata())
}
func checkForDropInvalidHeaders(r *parser2.Resource) types.BoolValue {
attributesProp := r.GetProperty("LoadBalancerAttributes")
if attributesProp.IsNotList() {
return types.BoolDefault(false, r.Metadata())
}
for _, attr := range attributesProp.AsList() {
if attr.GetStringProperty("Key").Value() == "routing.http.drop_invalid_header_fields.enabled" {
return attr.GetBoolProperty("Value")
}
}
return r.BoolDefault(false)
}

View File

@@ -0,0 +1,27 @@
package iam
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/iam"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an IAM instance
func Adapt(cfFile parser.FileContext) iam.IAM {
return iam.IAM{
PasswordPolicy: iam.PasswordPolicy{
Metadata: defsecTypes.NewUnmanagedMetadata(),
ReusePreventionCount: defsecTypes.IntDefault(0, defsecTypes.NewUnmanagedMetadata()),
RequireLowercase: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMetadata()),
RequireUppercase: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMetadata()),
RequireNumbers: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMetadata()),
RequireSymbols: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMetadata()),
MaxAgeDays: defsecTypes.IntDefault(0, defsecTypes.NewUnmanagedMetadata()),
MinimumLength: defsecTypes.IntDefault(0, defsecTypes.NewUnmanagedMetadata()),
},
Policies: getPolicies(cfFile),
Groups: getGroups(cfFile),
Users: getUsers(cfFile),
Roles: getRoles(cfFile),
}
}

View File

@@ -0,0 +1,126 @@
package iam
import (
"github.com/liamg/iamgo"
"github.com/aquasecurity/defsec/pkg/providers/aws/iam"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getPolicies(ctx parser2.FileContext) (policies []iam.Policy) {
for _, policyResource := range ctx.GetResourcesByType("AWS::IAM::Policy") {
policy := iam.Policy{
Metadata: policyResource.Metadata(),
Name: policyResource.GetStringProperty("PolicyName"),
Document: iam.Document{
Metadata: policyResource.Metadata(),
Parsed: iamgo.Document{},
},
Builtin: defsecTypes.Bool(false, policyResource.Metadata()),
}
if policyProp := policyResource.GetProperty("PolicyDocument"); policyProp.IsNotNil() {
doc, err := iamgo.Parse(policyProp.GetJsonBytes())
if err != nil {
continue
}
policy.Document.Parsed = *doc
}
policies = append(policies, policy)
}
return policies
}
func getRoles(ctx parser2.FileContext) (roles []iam.Role) {
for _, roleResource := range ctx.GetResourcesByType("AWS::IAM::Role") {
policyProp := roleResource.GetProperty("Policies")
roleName := roleResource.GetStringProperty("RoleName")
roles = append(roles, iam.Role{
Metadata: roleResource.Metadata(),
Name: roleName,
Policies: getPoliciesDocs(policyProp),
})
}
return roles
}
func getUsers(ctx parser2.FileContext) (users []iam.User) {
for _, userResource := range ctx.GetResourcesByType("AWS::IAM::User") {
policyProp := userResource.GetProperty("Policies")
userName := userResource.GetStringProperty("GroupName")
users = append(users, iam.User{
Metadata: userResource.Metadata(),
Name: userName,
LastAccess: defsecTypes.TimeUnresolvable(userResource.Metadata()),
Policies: getPoliciesDocs(policyProp),
AccessKeys: getAccessKeys(ctx, userName.Value()),
})
}
return users
}
func getAccessKeys(ctx parser2.FileContext, username string) (accessKeys []iam.AccessKey) {
for _, keyResource := range ctx.GetResourcesByType("AWS::IAM::AccessKey") {
keyUsername := keyResource.GetStringProperty("UserName")
if !keyUsername.EqualTo(username) {
continue
}
active := defsecTypes.BoolDefault(false, keyResource.Metadata())
if statusProp := keyResource.GetProperty("Status"); statusProp.IsString() {
active = defsecTypes.Bool(statusProp.AsString() == "Active", statusProp.Metadata())
}
accessKeys = append(accessKeys, iam.AccessKey{
Metadata: keyResource.Metadata(),
AccessKeyId: defsecTypes.StringUnresolvable(keyResource.Metadata()),
CreationDate: defsecTypes.TimeUnresolvable(keyResource.Metadata()),
LastAccess: defsecTypes.TimeUnresolvable(keyResource.Metadata()),
Active: active,
})
}
return accessKeys
}
func getGroups(ctx parser2.FileContext) (groups []iam.Group) {
for _, groupResource := range ctx.GetResourcesByType("AWS::IAM::Group") {
policyProp := groupResource.GetProperty("Policies")
groupName := groupResource.GetStringProperty("GroupName")
groups = append(groups, iam.Group{
Metadata: groupResource.Metadata(),
Name: groupName,
Policies: getPoliciesDocs(policyProp),
})
}
return groups
}
func getPoliciesDocs(policiesProp *parser2.Property) []iam.Policy {
var policies []iam.Policy
for _, policy := range policiesProp.AsList() {
policyProp := policy.GetProperty("PolicyDocument")
policyName := policy.GetStringProperty("PolicyName")
doc, err := iamgo.Parse(policyProp.GetJsonBytes())
if err != nil {
continue
}
policies = append(policies, iam.Policy{
Metadata: policyProp.Metadata(),
Name: policyName,
Document: iam.Document{
Metadata: policyProp.Metadata(),
Parsed: *doc,
},
Builtin: defsecTypes.Bool(false, policyProp.Metadata()),
})
}
return policies
}

View File

@@ -0,0 +1,13 @@
package kinesis
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/kinesis"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts a Kinesis instance
func Adapt(cfFile parser.FileContext) kinesis.Kinesis {
return kinesis.Kinesis{
Streams: getStreams(cfFile),
}
}

View File

@@ -0,0 +1,36 @@
package kinesis
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/kinesis"
"github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getStreams(ctx parser.FileContext) (streams []kinesis.Stream) {
streamResources := ctx.GetResourcesByType("AWS::Kinesis::Stream")
for _, r := range streamResources {
stream := kinesis.Stream{
Metadata: r.Metadata(),
Encryption: kinesis.Encryption{
Metadata: r.Metadata(),
Type: types.StringDefault("KMS", r.Metadata()),
KMSKeyID: types.StringDefault("", r.Metadata()),
},
}
if prop := r.GetProperty("StreamEncryption"); prop.IsNotNil() {
stream.Encryption = kinesis.Encryption{
Metadata: prop.Metadata(),
Type: prop.GetStringProperty("EncryptionType", "KMS"),
KMSKeyID: prop.GetStringProperty("KeyId"),
}
}
streams = append(streams, stream)
}
return streams
}

View File

@@ -0,0 +1,53 @@
package lambda
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/lambda"
"github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getFunctions(ctx parser2.FileContext) (functions []lambda.Function) {
functionResources := ctx.GetResourcesByType("AWS::Lambda::Function")
for _, r := range functionResources {
function := lambda.Function{
Metadata: r.Metadata(),
Tracing: lambda.Tracing{
Metadata: r.Metadata(),
Mode: types.StringDefault("PassThrough", r.Metadata()),
},
Permissions: getPermissions(r, ctx),
}
if prop := r.GetProperty("TracingConfig"); prop.IsNotNil() {
function.Tracing = lambda.Tracing{
Metadata: prop.Metadata(),
Mode: prop.GetStringProperty("Mode", "PassThrough"),
}
}
functions = append(functions, function)
}
return functions
}
func getPermissions(funcR *parser2.Resource, ctx parser2.FileContext) (perms []lambda.Permission) {
permissionResources := ctx.GetResourcesByType("AWS::Lambda::Permission")
for _, r := range permissionResources {
if prop := r.GetStringProperty("FunctionName"); prop.EqualTo(funcR.ID()) {
perm := lambda.Permission{
Metadata: r.Metadata(),
Principal: r.GetStringProperty("Principal"),
SourceARN: r.GetStringProperty("SourceArn"),
}
perms = append(perms, perm)
}
}
return perms
}

View File

@@ -0,0 +1,13 @@
package lambda
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/lambda"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts a lambda instance
func Adapt(cfFile parser.FileContext) lambda.Lambda {
return lambda.Lambda{
Functions: getFunctions(cfFile),
}
}

View File

@@ -0,0 +1,33 @@
package mq
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/mq"
"github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getBrokers(ctx parser.FileContext) (brokers []mq.Broker) {
for _, r := range ctx.GetResourcesByType("AWS::AmazonMQ::Broker") {
broker := mq.Broker{
Metadata: r.Metadata(),
PublicAccess: r.GetBoolProperty("PubliclyAccessible"),
Logging: mq.Logging{
Metadata: r.Metadata(),
General: types.BoolDefault(false, r.Metadata()),
Audit: types.BoolDefault(false, r.Metadata()),
},
}
if prop := r.GetProperty("Logs"); prop.IsNotNil() {
broker.Logging = mq.Logging{
Metadata: prop.Metadata(),
General: prop.GetBoolProperty("General"),
Audit: prop.GetBoolProperty("Audit"),
}
}
brokers = append(brokers, broker)
}
return brokers
}

View File

@@ -0,0 +1,13 @@
package mq
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/mq"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an MQ instance
func Adapt(cfFile parser.FileContext) mq.MQ {
return mq.MQ{
Brokers: getBrokers(cfFile),
}
}

View File

@@ -0,0 +1,80 @@
package msk
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/msk"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getClusters(ctx parser.FileContext) (clusters []msk.Cluster) {
for _, r := range ctx.GetResourcesByType("AWS::MSK::Cluster") {
cluster := msk.Cluster{
Metadata: r.Metadata(),
EncryptionInTransit: msk.EncryptionInTransit{
Metadata: r.Metadata(),
ClientBroker: defsecTypes.StringDefault("TLS", r.Metadata()),
},
EncryptionAtRest: msk.EncryptionAtRest{
Metadata: r.Metadata(),
KMSKeyARN: defsecTypes.StringDefault("", r.Metadata()),
Enabled: defsecTypes.BoolDefault(false, r.Metadata()),
},
Logging: msk.Logging{
Metadata: r.Metadata(),
Broker: msk.BrokerLogging{
Metadata: r.Metadata(),
S3: msk.S3Logging{
Metadata: r.Metadata(),
Enabled: defsecTypes.BoolDefault(false, r.Metadata()),
},
Cloudwatch: msk.CloudwatchLogging{
Metadata: r.Metadata(),
Enabled: defsecTypes.BoolDefault(false, r.Metadata()),
},
Firehose: msk.FirehoseLogging{
Metadata: r.Metadata(),
Enabled: defsecTypes.BoolDefault(false, r.Metadata()),
},
},
},
}
if encProp := r.GetProperty("EncryptionInfo.EncryptionInTransit"); encProp.IsNotNil() {
cluster.EncryptionInTransit = msk.EncryptionInTransit{
Metadata: encProp.Metadata(),
ClientBroker: encProp.GetStringProperty("ClientBroker", "TLS"),
}
}
if encAtRestProp := r.GetProperty("EncryptionInfo.EncryptionAtRest"); encAtRestProp.IsNotNil() {
cluster.EncryptionAtRest = msk.EncryptionAtRest{
Metadata: encAtRestProp.Metadata(),
KMSKeyARN: encAtRestProp.GetStringProperty("DataVolumeKMSKeyId", ""),
Enabled: defsecTypes.BoolDefault(true, encAtRestProp.Metadata()),
}
}
if loggingProp := r.GetProperty("LoggingInfo"); loggingProp.IsNotNil() {
cluster.Logging.Metadata = loggingProp.Metadata()
if brokerLoggingProp := loggingProp.GetProperty("BrokerLogs"); brokerLoggingProp.IsNotNil() {
cluster.Logging.Broker.Metadata = brokerLoggingProp.Metadata()
if s3Prop := brokerLoggingProp.GetProperty("S3"); s3Prop.IsNotNil() {
cluster.Logging.Broker.S3.Metadata = s3Prop.Metadata()
cluster.Logging.Broker.S3.Enabled = s3Prop.GetBoolProperty("Enabled", false)
}
if cwProp := brokerLoggingProp.GetProperty("CloudWatchLogs"); cwProp.IsNotNil() {
cluster.Logging.Broker.Cloudwatch.Metadata = cwProp.Metadata()
cluster.Logging.Broker.Cloudwatch.Enabled = cwProp.GetBoolProperty("Enabled", false)
}
if fhProp := brokerLoggingProp.GetProperty("Firehose"); fhProp.IsNotNil() {
cluster.Logging.Broker.Firehose.Metadata = fhProp.Metadata()
cluster.Logging.Broker.Firehose.Enabled = fhProp.GetBoolProperty("Enabled", false)
}
}
}
clusters = append(clusters, cluster)
}
return clusters
}

View File

@@ -0,0 +1,13 @@
package msk
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/msk"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an MSK instance
func Adapt(cfFile parser.FileContext) msk.MSK {
return msk.MSK{
Clusters: getClusters(cfFile),
}
}

View File

@@ -0,0 +1,34 @@
package neptune
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/neptune"
"github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getClusters(ctx parser2.FileContext) (clusters []neptune.Cluster) {
for _, r := range ctx.GetResourcesByType("AWS::Neptune::DBCluster") {
cluster := neptune.Cluster{
Metadata: r.Metadata(),
Logging: neptune.Logging{
Metadata: r.Metadata(),
Audit: getAuditLog(r),
},
StorageEncrypted: r.GetBoolProperty("StorageEncrypted"),
KMSKeyID: r.GetStringProperty("KmsKeyId"),
}
clusters = append(clusters, cluster)
}
return clusters
}
func getAuditLog(r *parser2.Resource) types.BoolValue {
if logsProp := r.GetProperty("EnableCloudwatchLogsExports"); logsProp.IsList() {
if logsProp.Contains("audit") {
return types.Bool(true, logsProp.Metadata())
}
}
return types.BoolDefault(false, r.Metadata())
}

View File

@@ -0,0 +1,13 @@
package neptune
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/neptune"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts a Neptune instance
func Adapt(cfFile parser.FileContext) neptune.Neptune {
return neptune.Neptune{
Clusters: getClusters(cfFile),
}
}

View File

@@ -0,0 +1,157 @@
package rds
import (
"context"
"testing"
"github.com/aquasecurity/defsec/pkg/providers/aws/rds"
"github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/internal/testutil"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
"github.com/stretchr/testify/require"
)
func TestAdapt(t *testing.T) {
tests := []struct {
name string
source string
expected rds.RDS
}{
{
name: "cluster with instances",
source: `AWSTemplateFormatVersion: 2010-09-09
Resources:
RDSCluster:
Type: 'AWS::RDS::DBCluster'
Properties:
DBClusterIdentifier: my-cluster1
Engine: aurora-postgresql
StorageEncrypted: true
KmsKeyId: "your-kms-key-id"
PerformanceInsightsEnabled: true
PerformanceInsightsKmsKeyId: "test-kms-key-id"
PublicAccess: true
DeletionProtection: true
BackupRetentionPeriod: 2
RDSDBInstance1:
Type: 'AWS::RDS::DBInstance'
Properties:
Engine: aurora-mysql
EngineVersion: "5.7.12"
DBInstanceIdentifier: test
DBClusterIdentifier:
Ref: RDSCluster
PubliclyAccessible: 'false'
DBInstanceClass: db.r3.xlarge
StorageEncrypted: true
KmsKeyId: "your-kms-key-id"
EnablePerformanceInsights: true
PerformanceInsightsKMSKeyId: "test-kms-key-id2"
MultiAZ: true
AutoMinorVersionUpgrade: true
DBInstanceArn: "arn:aws:rds:us-east-2:123456789012:db:my-mysql-instance-1"
EnableIAMDatabaseAuthentication: true
EnableCloudwatchLogsExports:
- "error"
- "general"
DBParameterGroupName: "testgroup"
Tags:
- Key: "keyname1"
Value: "value1"
- Key: "keyname2"
Value: "value2"
RDSDBParameterGroup:
Type: 'AWS::RDS::DBParameterGroup'
Properties:
Description: "CloudFormation Sample MySQL Parameter Group"
DBParameterGroupName: "testgroup"
`,
expected: rds.RDS{
ParameterGroups: []rds.ParameterGroups{
{
Metadata: types.NewTestMetadata(),
DBParameterGroupName: types.String("testgroup", types.NewTestMetadata()),
},
},
Clusters: []rds.Cluster{
{
Metadata: types.NewTestMetadata(),
BackupRetentionPeriodDays: types.Int(2, types.NewTestMetadata()),
Engine: types.String("aurora-postgresql", types.NewTestMetadata()),
Encryption: rds.Encryption{
EncryptStorage: types.Bool(true, types.NewTestMetadata()),
KMSKeyID: types.String("your-kms-key-id", types.NewTestMetadata()),
},
PerformanceInsights: rds.PerformanceInsights{
Metadata: types.NewTestMetadata(),
Enabled: types.Bool(true, types.NewTestMetadata()),
KMSKeyID: types.String("test-kms-key-id", types.NewTestMetadata()),
},
PublicAccess: types.Bool(false, types.NewTestMetadata()),
DeletionProtection: types.Bool(true, types.NewTestMetadata()),
Instances: []rds.ClusterInstance{
{
Instance: rds.Instance{
Metadata: types.NewTestMetadata(),
StorageEncrypted: types.Bool(true, types.NewTestMetadata()),
Encryption: rds.Encryption{
EncryptStorage: types.Bool(true, types.NewTestMetadata()),
KMSKeyID: types.String("your-kms-key-id", types.NewTestMetadata()),
},
DBInstanceIdentifier: types.String("test", types.NewTestMetadata()),
PubliclyAccessible: types.Bool(false, types.NewTestMetadata()),
PublicAccess: types.BoolDefault(false, types.NewTestMetadata()),
BackupRetentionPeriodDays: types.IntDefault(1, types.NewTestMetadata()),
Engine: types.StringDefault("aurora-mysql", types.NewTestMetadata()),
EngineVersion: types.String("5.7.12", types.NewTestMetadata()),
MultiAZ: types.Bool(true, types.NewTestMetadata()),
AutoMinorVersionUpgrade: types.Bool(true, types.NewTestMetadata()),
DBInstanceArn: types.String("arn:aws:rds:us-east-2:123456789012:db:my-mysql-instance-1", types.NewTestMetadata()),
IAMAuthEnabled: types.Bool(true, types.NewTestMetadata()),
PerformanceInsights: rds.PerformanceInsights{
Metadata: types.NewTestMetadata(),
Enabled: types.Bool(true, types.NewTestMetadata()),
KMSKeyID: types.String("test-kms-key-id2", types.NewTestMetadata()),
},
EnabledCloudwatchLogsExports: []types.StringValue{
types.String("error", types.NewTestMetadata()),
types.String("general", types.NewTestMetadata()),
},
DBParameterGroups: []rds.DBParameterGroupsList{
{
DBParameterGroupName: types.String("testgroup", types.NewTestMetadata()),
},
},
TagList: []rds.TagList{
{
Metadata: types.NewTestMetadata(),
},
{
Metadata: types.NewTestMetadata(),
},
},
},
ClusterIdentifier: types.String("RDSCluster", types.NewTestMetadata()),
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fs := testutil.CreateFS(t, map[string]string{
"template.yaml": tt.source,
})
p := parser.New()
fctx, err := p.ParseFile(context.TODO(), fs, "template.yaml")
require.NoError(t, err)
testutil.AssertDefsecEqual(t, tt.expected, Adapt(*fctx))
})
}
}

View File

@@ -0,0 +1,48 @@
package rds
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/rds"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getClusters(ctx parser.FileContext) (clusters map[string]rds.Cluster) {
clusters = make(map[string]rds.Cluster)
for _, clusterResource := range ctx.GetResourcesByType("AWS::RDS::DBCluster") {
clusters[clusterResource.ID()] = rds.Cluster{
Metadata: clusterResource.Metadata(),
BackupRetentionPeriodDays: clusterResource.GetIntProperty("BackupRetentionPeriod", 1),
PerformanceInsights: rds.PerformanceInsights{
Metadata: clusterResource.Metadata(),
Enabled: clusterResource.GetBoolProperty("PerformanceInsightsEnabled"),
KMSKeyID: clusterResource.GetStringProperty("PerformanceInsightsKmsKeyId"),
},
Encryption: rds.Encryption{
Metadata: clusterResource.Metadata(),
EncryptStorage: clusterResource.GetBoolProperty("StorageEncrypted"),
KMSKeyID: clusterResource.GetStringProperty("KmsKeyId"),
},
PublicAccess: defsecTypes.BoolDefault(false, clusterResource.Metadata()),
Engine: clusterResource.GetStringProperty("Engine", rds.EngineAurora),
LatestRestorableTime: defsecTypes.TimeUnresolvable(clusterResource.Metadata()),
DeletionProtection: clusterResource.GetBoolProperty("DeletionProtection"),
}
}
return clusters
}
func getClassic(ctx parser.FileContext) rds.Classic {
return rds.Classic{
DBSecurityGroups: getClassicSecurityGroups(ctx),
}
}
func getClassicSecurityGroups(ctx parser.FileContext) (groups []rds.DBSecurityGroup) {
for _, dbsgResource := range ctx.GetResourcesByType("AWS::RDS::DBSecurityGroup") {
group := rds.DBSecurityGroup{
Metadata: dbsgResource.Metadata(),
}
groups = append(groups, group)
}
return groups
}

View File

@@ -0,0 +1,130 @@
package rds
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/rds"
"github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getClustersAndInstances(ctx parser2.FileContext) ([]rds.Cluster, []rds.Instance) {
clusterMap := getClusters(ctx)
var orphans []rds.Instance
for _, r := range ctx.GetResourcesByType("AWS::RDS::DBInstance") {
instance := rds.Instance{
Metadata: r.Metadata(),
BackupRetentionPeriodDays: r.GetIntProperty("BackupRetentionPeriod", 1),
ReplicationSourceARN: r.GetStringProperty("SourceDBInstanceIdentifier"),
PerformanceInsights: rds.PerformanceInsights{
Metadata: r.Metadata(),
Enabled: r.GetBoolProperty("EnablePerformanceInsights"),
KMSKeyID: r.GetStringProperty("PerformanceInsightsKMSKeyId"),
},
Encryption: rds.Encryption{
Metadata: r.Metadata(),
EncryptStorage: r.GetBoolProperty("StorageEncrypted"),
KMSKeyID: r.GetStringProperty("KmsKeyId"),
},
PublicAccess: r.GetBoolProperty("PubliclyAccessible", true),
Engine: r.GetStringProperty("Engine"),
IAMAuthEnabled: r.GetBoolProperty("EnableIAMDatabaseAuthentication"),
DeletionProtection: r.GetBoolProperty("DeletionProtection", false),
DBInstanceArn: r.GetStringProperty("DBInstanceArn"),
StorageEncrypted: r.GetBoolProperty("StorageEncrypted", false),
DBInstanceIdentifier: r.GetStringProperty("DBInstanceIdentifier"),
DBParameterGroups: getDBParameterGroups(ctx, r),
TagList: getTagList(r),
EnabledCloudwatchLogsExports: getEnabledCloudwatchLogsExports(r),
EngineVersion: r.GetStringProperty("EngineVersion"),
AutoMinorVersionUpgrade: r.GetBoolProperty("AutoMinorVersionUpgrade"),
MultiAZ: r.GetBoolProperty("MultiAZ"),
PubliclyAccessible: r.GetBoolProperty("PubliclyAccessible"),
LatestRestorableTime: types.TimeUnresolvable(r.Metadata()),
ReadReplicaDBInstanceIdentifiers: getReadReplicaDBInstanceIdentifiers(r),
}
if clusterID := r.GetProperty("DBClusterIdentifier"); clusterID.IsString() {
if cluster, exist := clusterMap[clusterID.AsString()]; exist {
cluster.Instances = append(cluster.Instances, rds.ClusterInstance{
Instance: instance,
ClusterIdentifier: clusterID.AsStringValue(),
})
clusterMap[clusterID.AsString()] = cluster
}
} else {
orphans = append(orphans, instance)
}
}
clusters := make([]rds.Cluster, 0, len(clusterMap))
for _, cluster := range clusterMap {
clusters = append(clusters, cluster)
}
return clusters, orphans
}
func getDBParameterGroups(ctx parser2.FileContext, r *parser2.Resource) (dbParameterGroup []rds.DBParameterGroupsList) {
dbParameterGroupName := r.GetStringProperty("DBParameterGroupName")
for _, r := range ctx.GetResourcesByType("AWS::RDS::DBParameterGroup") {
name := r.GetStringProperty("DBParameterGroupName")
if !dbParameterGroupName.EqualTo(name.Value()) {
continue
}
dbpmgl := rds.DBParameterGroupsList{
Metadata: r.Metadata(),
DBParameterGroupName: name,
KMSKeyID: types.StringUnresolvable(r.Metadata()),
}
dbParameterGroup = append(dbParameterGroup, dbpmgl)
}
return dbParameterGroup
}
func getEnabledCloudwatchLogsExports(r *parser2.Resource) (enabledcloudwatchlogexportslist []types.StringValue) {
enabledCloudwatchLogExportList := r.GetProperty("EnableCloudwatchLogsExports")
if enabledCloudwatchLogExportList.IsNil() || enabledCloudwatchLogExportList.IsNotList() {
return enabledcloudwatchlogexportslist
}
for _, ecle := range enabledCloudwatchLogExportList.AsList() {
enabledcloudwatchlogexportslist = append(enabledcloudwatchlogexportslist, ecle.AsStringValue())
}
return enabledcloudwatchlogexportslist
}
func getTagList(r *parser2.Resource) (taglist []rds.TagList) {
tagLists := r.GetProperty("Tags")
if tagLists.IsNil() || tagLists.IsNotList() {
return taglist
}
for _, tl := range tagLists.AsList() {
taglist = append(taglist, rds.TagList{
Metadata: tl.Metadata(),
})
}
return taglist
}
func getReadReplicaDBInstanceIdentifiers(r *parser2.Resource) (readreplicadbidentifier []types.StringValue) {
readReplicaDBIdentifier := r.GetProperty("SourceDBInstanceIdentifier")
if readReplicaDBIdentifier.IsNil() || readReplicaDBIdentifier.IsNotList() {
return readreplicadbidentifier
}
for _, rr := range readReplicaDBIdentifier.AsList() {
readreplicadbidentifier = append(readreplicadbidentifier, rr.AsStringValue())
}
return readreplicadbidentifier
}

View File

@@ -0,0 +1,42 @@
package rds
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/rds"
"github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getParameterGroups(ctx parser2.FileContext) (parametergroups []rds.ParameterGroups) {
for _, r := range ctx.GetResourcesByType("AWS::RDS::DBParameterGroup") {
paramgroup := rds.ParameterGroups{
Metadata: r.Metadata(),
DBParameterGroupName: r.GetStringProperty("DBParameterGroupName"),
DBParameterGroupFamily: r.GetStringProperty("DBParameterGroupFamily"),
Parameters: getParameters(r),
}
parametergroups = append(parametergroups, paramgroup)
}
return parametergroups
}
func getParameters(r *parser2.Resource) (parameters []rds.Parameters) {
dBParam := r.GetProperty("Parameters")
if dBParam.IsNil() || dBParam.IsNotList() {
return parameters
}
for _, dbp := range dBParam.AsList() {
parameters = append(parameters, rds.Parameters{
Metadata: dbp.Metadata(),
ParameterName: types.StringDefault("", dbp.Metadata()),
ParameterValue: types.StringDefault("", dbp.Metadata()),
})
}
return parameters
}

View File

@@ -0,0 +1,18 @@
package rds
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/rds"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an RDS instance
func Adapt(cfFile parser.FileContext) rds.RDS {
clusters, orphans := getClustersAndInstances(cfFile)
return rds.RDS{
Instances: orphans,
Clusters: clusters,
Classic: getClassic(cfFile),
ParameterGroups: getParameterGroups(cfFile),
Snapshots: nil,
}
}

View File

@@ -0,0 +1,54 @@
package redshift
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/redshift"
"github.com/aquasecurity/defsec/pkg/types"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getClusters(ctx parser.FileContext) (clusters []redshift.Cluster) {
for _, r := range ctx.GetResourcesByType("AWS::Redshift::Cluster") {
cluster := redshift.Cluster{
Metadata: r.Metadata(),
ClusterIdentifier: r.GetStringProperty("ClusterIdentifier"),
AllowVersionUpgrade: r.GetBoolProperty("AllowVersionUpgrade"),
NodeType: r.GetStringProperty("NodeType"),
NumberOfNodes: r.GetIntProperty("NumberOfNodes"),
PubliclyAccessible: r.GetBoolProperty("PubliclyAccessible"),
MasterUsername: r.GetStringProperty("MasterUsername"),
VpcId: types.String("", r.Metadata()),
LoggingEnabled: types.Bool(false, r.Metadata()),
AutomatedSnapshotRetentionPeriod: r.GetIntProperty("AutomatedSnapshotRetentionPeriod"),
Encryption: redshift.Encryption{
Metadata: r.Metadata(),
Enabled: r.GetBoolProperty("Encrypted"),
KMSKeyID: r.GetStringProperty("KmsKeyId"),
},
EndPoint: redshift.EndPoint{
Metadata: r.Metadata(),
Port: r.GetIntProperty("Endpoint.Port"),
},
SubnetGroupName: r.GetStringProperty("ClusterSubnetGroupName", ""),
}
clusters = append(clusters, cluster)
}
return clusters
}
func getParameters(ctx parser.FileContext) (parameter []redshift.ClusterParameter) {
paraRes := ctx.GetResourcesByType("AWS::Redshift::ClusterParameterGroup")
var parameters []redshift.ClusterParameter
for _, r := range paraRes {
for _, par := range r.GetProperty("Parameters").AsList() {
parameters = append(parameters, redshift.ClusterParameter{
Metadata: par.Metadata(),
ParameterName: par.GetStringProperty("ParameterName"),
ParameterValue: par.GetStringProperty("ParameterValue"),
})
}
}
return parameters
}

View File

@@ -0,0 +1,16 @@
package redshift
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/redshift"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts a RedShift instance
func Adapt(cfFile parser.FileContext) redshift.Redshift {
return redshift.Redshift{
Clusters: getClusters(cfFile),
SecurityGroups: getSecurityGroups(cfFile),
ClusterParameters: getParameters(cfFile),
ReservedNodes: nil,
}
}

View File

@@ -0,0 +1,17 @@
package redshift
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/redshift"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getSecurityGroups(ctx parser.FileContext) (groups []redshift.SecurityGroup) {
for _, groupResource := range ctx.GetResourcesByType("AWS::Redshift::ClusterSecurityGroup") {
group := redshift.SecurityGroup{
Metadata: groupResource.Metadata(),
Description: groupResource.GetProperty("Description").AsStringValue(),
}
groups = append(groups, group)
}
return groups
}

View File

@@ -0,0 +1,147 @@
package s3
import (
"regexp"
"strings"
"github.com/aquasecurity/defsec/pkg/providers/aws/s3"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
var aclConvertRegex = regexp.MustCompile(`[A-Z][^A-Z]*`)
func getBuckets(cfFile parser2.FileContext) []s3.Bucket {
var buckets []s3.Bucket
bucketResources := cfFile.GetResourcesByType("AWS::S3::Bucket")
for _, r := range bucketResources {
s3b := s3.Bucket{
Metadata: r.Metadata(),
Name: r.GetStringProperty("BucketName"),
PublicAccessBlock: getPublicAccessBlock(r),
Encryption: getEncryption(r, cfFile),
Versioning: s3.Versioning{
Metadata: r.Metadata(),
Enabled: hasVersioning(r),
MFADelete: defsecTypes.BoolUnresolvable(r.Metadata()),
},
Logging: getLogging(r),
ACL: convertAclValue(r.GetStringProperty("AccessControl", "private")),
LifecycleConfiguration: getLifecycle(r),
AccelerateConfigurationStatus: r.GetStringProperty("AccelerateConfiguration.AccelerationStatus"),
Website: getWebsite(r),
BucketLocation: defsecTypes.String("", r.Metadata()),
Objects: nil,
}
buckets = append(buckets, s3b)
}
return buckets
}
func getPublicAccessBlock(r *parser2.Resource) *s3.PublicAccessBlock {
if block := r.GetProperty("PublicAccessBlockConfiguration"); block.IsNil() {
return nil
}
return &s3.PublicAccessBlock{
Metadata: r.Metadata(),
BlockPublicACLs: r.GetBoolProperty("PublicAccessBlockConfiguration.BlockPublicAcls"),
BlockPublicPolicy: r.GetBoolProperty("PublicAccessBlockConfiguration.BlockPublicPolicy"),
IgnorePublicACLs: r.GetBoolProperty("PublicAccessBlockConfiguration.IgnorePublicAcls"),
RestrictPublicBuckets: r.GetBoolProperty("PublicAccessBlockConfiguration.RestrictPublicBuckets"),
}
}
func convertAclValue(aclValue defsecTypes.StringValue) defsecTypes.StringValue {
matches := aclConvertRegex.FindAllString(aclValue.Value(), -1)
return defsecTypes.String(strings.ToLower(strings.Join(matches, "-")), aclValue.GetMetadata())
}
func getLogging(r *parser2.Resource) s3.Logging {
logging := s3.Logging{
Metadata: r.Metadata(),
Enabled: defsecTypes.BoolDefault(false, r.Metadata()),
TargetBucket: defsecTypes.StringDefault("", r.Metadata()),
}
if config := r.GetProperty("LoggingConfiguration"); config.IsNotNil() {
logging.TargetBucket = config.GetStringProperty("DestinationBucketName")
if logging.TargetBucket.IsNotEmpty() || !logging.TargetBucket.GetMetadata().IsResolvable() {
logging.Enabled = defsecTypes.Bool(true, config.Metadata())
}
}
return logging
}
func hasVersioning(r *parser2.Resource) defsecTypes.BoolValue {
versioningProp := r.GetProperty("VersioningConfiguration.Status")
if versioningProp.IsNil() {
return defsecTypes.BoolDefault(false, r.Metadata())
}
versioningEnabled := false
if versioningProp.EqualTo("Enabled") {
versioningEnabled = true
}
return defsecTypes.Bool(versioningEnabled, versioningProp.Metadata())
}
func getEncryption(r *parser2.Resource, _ parser2.FileContext) s3.Encryption {
encryption := s3.Encryption{
Metadata: r.Metadata(),
Enabled: defsecTypes.BoolDefault(false, r.Metadata()),
Algorithm: defsecTypes.StringDefault("", r.Metadata()),
KMSKeyId: defsecTypes.StringDefault("", r.Metadata()),
}
if encryptProps := r.GetProperty("BucketEncryption.ServerSideEncryptionConfiguration"); encryptProps.IsNotNil() {
for _, rule := range encryptProps.AsList() {
if algo := rule.GetProperty("ServerSideEncryptionByDefault.SSEAlgorithm"); algo.EqualTo("AES256") {
encryption.Enabled = defsecTypes.Bool(true, algo.Metadata())
} else if kmsKeyProp := rule.GetProperty("ServerSideEncryptionByDefault.KMSMasterKeyID"); !kmsKeyProp.IsEmpty() && kmsKeyProp.IsString() {
encryption.KMSKeyId = kmsKeyProp.AsStringValue()
}
if encryption.Enabled.IsFalse() {
encryption.Enabled = rule.GetBoolProperty("BucketKeyEnabled", false)
}
}
}
return encryption
}
func getLifecycle(resource *parser2.Resource) []s3.Rules {
LifecycleProp := resource.GetProperty("LifecycleConfiguration")
RuleProp := LifecycleProp.GetProperty("Rules")
var rule []s3.Rules
if RuleProp.IsNil() || RuleProp.IsNotList() {
return rule
}
for _, r := range RuleProp.AsList() {
rule = append(rule, s3.Rules{
Metadata: r.Metadata(),
Status: r.GetStringProperty("Status"),
})
}
return rule
}
func getWebsite(r *parser2.Resource) *s3.Website {
if block := r.GetProperty("WebsiteConfiguration"); block.IsNil() {
return nil
} else {
return &s3.Website{
Metadata: block.Metadata(),
}
}
}

View File

@@ -0,0 +1,13 @@
package s3
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/s3"
"github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
// Adapt adapts an S3 instance
func Adapt(cfFile parser.FileContext) s3.S3 {
return s3.S3{
Buckets: getBuckets(cfFile),
}
}

View File

@@ -0,0 +1,96 @@
package sam
import (
"github.com/aquasecurity/defsec/pkg/providers/aws/sam"
defsecTypes "github.com/aquasecurity/defsec/pkg/types"
parser2 "github.com/aquasecurity/trivy/pkg/iac/scanners/cloudformation/parser"
)
func getApis(cfFile parser2.FileContext) (apis []sam.API) {
apiResources := cfFile.GetResourcesByType("AWS::Serverless::Api")
for _, r := range apiResources {
api := sam.API{
Metadata: r.Metadata(),
Name: r.GetStringProperty("Name", ""),
TracingEnabled: r.GetBoolProperty("TracingEnabled"),
DomainConfiguration: getDomainConfiguration(r),
AccessLogging: getAccessLogging(r),
RESTMethodSettings: getRestMethodSettings(r),
}
apis = append(apis, api)
}
return apis
}
func getRestMethodSettings(r *parser2.Resource) sam.RESTMethodSettings {
settings := sam.RESTMethodSettings{
Metadata: r.Metadata(),
CacheDataEncrypted: defsecTypes.BoolDefault(false, r.Metadata()),
LoggingEnabled: defsecTypes.BoolDefault(false, r.Metadata()),
DataTraceEnabled: defsecTypes.BoolDefault(false, r.Metadata()),
MetricsEnabled: defsecTypes.BoolDefault(false, r.Metadata()),
}
settingsProp := r.GetProperty("MethodSettings")
if settingsProp.IsNotNil() {
settings = sam.RESTMethodSettings{
Metadata: settingsProp.Metadata(),
CacheDataEncrypted: settingsProp.GetBoolProperty("CacheDataEncrypted"),
LoggingEnabled: defsecTypes.BoolDefault(false, settingsProp.Metadata()),
DataTraceEnabled: settingsProp.GetBoolProperty("DataTraceEnabled"),
MetricsEnabled: settingsProp.GetBoolProperty("MetricsEnabled"),
}
if loggingLevel := settingsProp.GetProperty("LoggingLevel"); loggingLevel.IsNotNil() {
if loggingLevel.EqualTo("OFF", parser2.IgnoreCase) {
settings.LoggingEnabled = defsecTypes.Bool(false, loggingLevel.Metadata())
} else {
settings.LoggingEnabled = defsecTypes.Bool(true, loggingLevel.Metadata())
}
}
}
return settings
}
func getAccessLogging(r *parser2.Resource) sam.AccessLogging {
logging := sam.AccessLogging{
Metadata: r.Metadata(),
CloudwatchLogGroupARN: defsecTypes.StringDefault("", r.Metadata()),
}
if access := r.GetProperty("AccessLogSetting"); access.IsNotNil() {
logging = sam.AccessLogging{
Metadata: access.Metadata(),
CloudwatchLogGroupARN: access.GetStringProperty("DestinationArn", ""),
}
}
return logging
}
func getDomainConfiguration(r *parser2.Resource) sam.DomainConfiguration {
domainConfig := sam.DomainConfiguration{
Metadata: r.Metadata(),
Name: defsecTypes.StringDefault("", r.Metadata()),
SecurityPolicy: defsecTypes.StringDefault("TLS_1_0", r.Metadata()),
}
if domain := r.GetProperty("Domain"); domain.IsNotNil() {
domainConfig = sam.DomainConfiguration{
Metadata: domain.Metadata(),
Name: domain.GetStringProperty("DomainName", ""),
SecurityPolicy: domain.GetStringProperty("SecurityPolicy", "TLS_1_0"),
}
}
return domainConfig
}

Some files were not shown because too many files have changed in this diff Show More