diff --git a/Makefile b/Makefile index 374664d2..8b737d74 100644 --- a/Makefile +++ b/Makefile @@ -115,11 +115,10 @@ test: test/go test/lint test/go: cnspec/generate test/go/plain test/go/plain: - # TODO /motor/docker/docker_engine cannot be executed inside of docker - go test -cover $(shell go list ./... | grep -v '/motor/discovery/docker_engine') + go test -cover $(shell go list ./...) test/go/plain-ci: prep/tools - gotestsum --junitfile report.xml --format pkgname -- -cover $(shell go list ./... | grep -v '/vendor/' | grep -v '/motor/discovery/docker_engine') + gotestsum --junitfile report.xml --format pkgname -- -cover $(shell go list ./... | grep -v '/vendor/') .PHONY: test/lint/staticcheck test/lint/staticcheck: diff --git a/test/bundles.go b/test/bundles.go new file mode 100644 index 00000000..fb83acc4 --- /dev/null +++ b/test/bundles.go @@ -0,0 +1,58 @@ +// Copyright (c) Mondoo, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package test + +import ( + "context" + "go.mondoo.com/cnquery/v9/logger" + "go.mondoo.com/cnquery/v9/providers" + "go.mondoo.com/cnquery/v9/providers-sdk/v1/inventory" + "go.mondoo.com/cnspec/v9/policy" + "go.mondoo.com/cnspec/v9/policy/scan" +) + +func init() { + logger.Set("info") +} + +func runBundle(policyBundlePath string, policyMrn string, asset *inventory.Asset) (*policy.Report, error) { + ctx := context.Background() + policyBundle, err := policy.BundleFromPaths(policyBundlePath) + if err != nil { + return nil, err + } + + policyBundle.OwnerMrn = "//policy.api.mondoo.app" + var results *policy.Report + + policyFilters := []string{} + if policyMrn != "" { + policyFilters = append(policyFilters, policyMrn) + } + + scanner := scan.NewLocalScanner(scan.WithRecording(providers.NullRecording{})) // TODO: fix recording + result, err := scanner.RunIncognito(ctx, &scan.Job{ + Inventory: &inventory.Inventory{ + Spec: &inventory.InventorySpec{ + Assets: []*inventory.Asset{asset}, + }, + }, + Bundle: policyBundle, + PolicyFilters: policyFilters, + ReportType: scan.ReportType_FULL, + }) + if err != nil { + return nil, err + } + + reports := result.GetFull().Reports + if len(reports) > 0 { + for _, report := range reports { + results = report + break + } + } + + return results, err +} diff --git a/test/k8s_test.go b/test/k8s_test.go new file mode 100644 index 00000000..79e280be --- /dev/null +++ b/test/k8s_test.go @@ -0,0 +1,60 @@ +// Copyright (c) Mondoo, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.mondoo.com/cnquery/v9/providers" + "go.mondoo.com/cnquery/v9/providers-sdk/v1/inventory" +) + +func TestKubernetesBundles(t *testing.T) { + providers.EnsureProvider(providers.ProviderLookup{ID: "go.mondoo.com/cnquery/v9/providers/k8s"}, true, nil) + + type TestCase struct { + bundleFile string + testDir string + policyMrn string + score uint32 + } + + tests := []TestCase{ + { + bundleFile: "./testdata/mondoo-kubernetes-security.mql.yaml", + testDir: "./testdata/k8s/pass/pod.yaml", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-kubernetes-security", + score: 100, + }, + { + bundleFile: "./testdata/mondoo-kubernetes-security.mql.yaml", + testDir: "./testdata/k8s/fail/pod-nonroot.yaml", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-kubernetes-security", + score: 0x0, + }, + } + + for i := range tests { + test := tests[i] + t.Run(test.testDir, func(t *testing.T) { + report, err := runBundle(test.bundleFile, test.policyMrn, &inventory.Asset{ + Connections: []*inventory.Config{{ + Type: "k8s", + Options: map[string]string{ + "path": test.testDir, + }, + Discover: &inventory.Discovery{ + Targets: []string{"pods"}, // ignore the manifest which does not return anything + }, + }}, + }) + require.NoError(t, err) + + score := report.Scores[test.policyMrn] + assert.Equal(t, test.score, score.Value) + }) + } +} diff --git a/test/terraform_test.go b/test/terraform_test.go new file mode 100644 index 00000000..c557f468 --- /dev/null +++ b/test/terraform_test.go @@ -0,0 +1,80 @@ +// Copyright (c) Mondoo, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.mondoo.com/cnquery/v9/providers" + "go.mondoo.com/cnquery/v9/providers-sdk/v1/inventory" +) + +func TestTerraformBundles(t *testing.T) { + providers.EnsureProvider(providers.ProviderLookup{ID: "go.mondoo.com/cnquery/v9/providers/terraform"}, true, nil) + type TestCase struct { + bundleFile string + testDir string + policyMrn string + score uint32 + } + + tests := []TestCase{ + { + bundleFile: "./testdata/mondoo-terraform-aws-security.mql.yaml", + testDir: "./testdata/terraform/aws-3.xx/pass", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-terraform-aws-security", + score: 100, + }, { + bundleFile: "./testdata/mondoo-terraform-aws-security.mql.yaml", + testDir: "./testdata/terraform/aws-3.xx/fail", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-terraform-aws-security", + // NOTE: terraform-aws-security-s3-bucket-level-public-access-prohibited is not correctly implemented but needs pay the piper. + // 3/28/2022 - Tests are passing now but not for the right reasons. We still need to revisit this query since it involves testing + // whether configuration was applied to a specific bucket. + score: 0, + }, { + bundleFile: "./testdata/mondoo-terraform-aws-security.mql.yaml", + testDir: "./testdata/terraform/aws-4.xx/pass", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-terraform-aws-security", + score: 100, + }, { + bundleFile: "./testdata/mondoo-terraform-aws-security.mql.yaml", + testDir: "./testdata/terraform/aws-4.xx/fail", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-terraform-aws-security", + score: 0, + }, { + bundleFile: "./testdata/mondoo-terraform-gcp-security.mql.yaml", + testDir: "./testdata/terraform/gcp/pass", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-terraform-gcp-security", + score: 100, + }, { + bundleFile: "./testdata/mondoo-terraform-gcp-security.mql.yaml", + testDir: "./testdata/terraform/gcp/fail", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-terraform-gcp-security", + score: 0, + }, + } + + for i := range tests { + test := tests[i] + t.Run(test.testDir, func(t *testing.T) { + report, err := runBundle(test.bundleFile, test.policyMrn, &inventory.Asset{ + Connections: []*inventory.Config{ + { + Type: "terraform-hcl", + Options: map[string]string{ + "path": test.testDir, + }, + }, + }, + }) + require.NoError(t, err) + + score := report.Scores[test.policyMrn] + assert.Equal(t, test.score, score.Value) + }) + } +} diff --git a/test/testdata/k8s/fail/daemonset.yaml b/test/testdata/k8s/fail/daemonset.yaml new file mode 100644 index 00000000..88b39d21 --- /dev/null +++ b/test/testdata/k8s/fail/daemonset.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: fluentd-elasticsearch + namespace: kube-system + labels: + k8s-app: fluentd-logging +spec: + selector: + matchLabels: + name: fluentd-elasticsearch + template: + metadata: + labels: + name: fluentd-elasticsearch + spec: + serviceAccount: default + serviceAccountName: default + servicautomountServiceAccountTokeneAccount: true + hostNetwork: true + hostPID: true + hostIPC: true + hostAliases: + - ip: 127.0.0.1 + hostnames: + - myadded.examplehostname + tolerations: + # these tolerations are to have the daemonset runnable on control plane nodes + # remove them if your control plane nodes should not run pods + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + containers: + - name: fluentd-elasticsearch + image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 + imagePullPolicy: Always + resources: + limits: + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + securityContext: + privileged: true + readOnlyRootFilesystem: false + runAsNonRoot: false + runAsUser: 0 + allowPrivilegeEscalation: true + volumeMounts: + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + terminationGracePeriodSeconds: 30 + volumes: + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers diff --git a/test/testdata/k8s/fail/pod-nonroot.yaml b/test/testdata/k8s/fail/pod-nonroot.yaml new file mode 100644 index 00000000..865e77f0 --- /dev/null +++ b/test/testdata/k8s/fail/pod-nonroot.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-pod + namespace: default +spec: + serviceAccountName: mondoo-test + serviceAccount: mondoo-test + securityContext: + runAsNonRoot: true + containers: + - name: nginx + image: nginx:1.21.6 + ports: + - containerPort: 8080 + imagePullPolicy: Always + securityContext: + runAsNonRoot: false + privileged: false + readOnlyRootFilesystem: true + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + readinessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 15 + periodSeconds: 20 \ No newline at end of file diff --git a/test/testdata/k8s/pass/batch.yaml b/test/testdata/k8s/pass/batch.yaml new file mode 100644 index 00000000..3a154abb --- /dev/null +++ b/test/testdata/k8s/pass/batch.yaml @@ -0,0 +1,49 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: hello-batch + namespace: sample +spec: + template: + spec: + serviceAccountName: mondoo-test + serviceAccount: mondoo-test + containers: + - name: hello + image: busybox:v3 + imagePullPolicy: Always + command: [ 'sh', '-c', 'echo "Hello, Kubernetes!" && sleep 3600' ] + livenessProbe: + exec: + command: + - /bin/sh + - -c + - exit 0 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + exec: + command: + - /bin/sh + - -c + - exit 0 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 100m + memory: 100Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + restartPolicy: OnFailure \ No newline at end of file diff --git a/test/testdata/k8s/pass/daemonset.yaml b/test/testdata/k8s/pass/daemonset.yaml new file mode 100644 index 00000000..3c627949 --- /dev/null +++ b/test/testdata/k8s/pass/daemonset.yaml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: hello-daemonset + namespace: sample +spec: + selector: + matchLabels: + name: hello-daemonset + template: + metadata: + labels: + name: hello-daemonset + spec: + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccountName: mondoo-test + serviceAccount: mondoo-test + containers: + - name: fluentd-elasticsearch + image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 + imagePullPolicy: Always + securityContext: + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + livenessProbe: + exec: + command: + - /bin/sh + - -c + - exit 0 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + exec: + command: + - /bin/sh + - -c + - exit 0 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + terminationGracePeriodSeconds: 30 + volumes: + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers \ No newline at end of file diff --git a/test/testdata/k8s/pass/deployment.yaml b/test/testdata/k8s/pass/deployment.yaml new file mode 100644 index 00000000..d232579b --- /dev/null +++ b/test/testdata/k8s/pass/deployment.yaml @@ -0,0 +1,44 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hello-deployment + namespace: sample +spec: + replicas: 3 + selector: + matchLabels: + app: hello-deployment + template: + metadata: + labels: + app: hello-deployment + spec: + serviceAccountName: mondoo-test + serviceAccount: mondoo-test + containers: + - name: nginx + image: nginx:1.21.6 + imagePullPolicy: Always + ports: + - containerPort: 8080 + securityContext: + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + readinessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 15 + periodSeconds: 20 \ No newline at end of file diff --git a/test/testdata/k8s/pass/job.yaml b/test/testdata/k8s/pass/job.yaml new file mode 100644 index 00000000..46d141d7 --- /dev/null +++ b/test/testdata/k8s/pass/job.yaml @@ -0,0 +1,50 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: hello-job + namespace: sample +spec: + template: + spec: + serviceAccountName: mondoo-test + serviceAccount: mondoo-test + containers: + - name: hello-job + image: perl + imagePullPolicy: Always + command: [ "perl", "-Mbignum=bpi", "-wle", "print bpi(2000)" ] + livenessProbe: + exec: + command: + - /bin/sh + - -c + - exit 0 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + exec: + command: + - /bin/sh + - -c + - exit 0 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 100m + memory: 100Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + restartPolicy: Never + backoffLimit: 4 \ No newline at end of file diff --git a/test/testdata/k8s/pass/networkpolicy.yaml b/test/testdata/k8s/pass/networkpolicy.yaml new file mode 100644 index 00000000..76a2baed --- /dev/null +++ b/test/testdata/k8s/pass/networkpolicy.yaml @@ -0,0 +1,34 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: hello-network-policy + namespace: sample +spec: + podSelector: + matchLabels: + role: db + policyTypes: + - Ingress + - Egress + ingress: + - from: + - ipBlock: + cidr: 172.17.0.0/16 + except: + - 172.17.1.0/24 + - namespaceSelector: + matchLabels: + project: myproject + - podSelector: + matchLabels: + role: frontend + ports: + - protocol: TCP + port: 6379 + egress: + - to: + - ipBlock: + cidr: 10.0.0.0/24 + ports: + - protocol: TCP + port: 5978 \ No newline at end of file diff --git a/test/testdata/k8s/pass/pod-nonroot.yaml b/test/testdata/k8s/pass/pod-nonroot.yaml new file mode 100644 index 00000000..862dd3d9 --- /dev/null +++ b/test/testdata/k8s/pass/pod-nonroot.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-pod + namespace: default +spec: + serviceAccountName: mondoo-test + serviceAccount: mondoo-test + securityContext: + runAsNonRoot: true + containers: + - name: nginx + image: nginx:1.21.6 + ports: + - containerPort: 8080 + imagePullPolicy: Always + securityContext: + privileged: false + readOnlyRootFilesystem: true + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + readinessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 15 + periodSeconds: 20 \ No newline at end of file diff --git a/test/testdata/k8s/pass/pod.yaml b/test/testdata/k8s/pass/pod.yaml new file mode 100644 index 00000000..1be2d3ae --- /dev/null +++ b/test/testdata/k8s/pass/pod.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-pod + namespace: default +spec: + serviceAccountName: mondoo-test + serviceAccount: mondoo-test + containers: + - name: nginx + image: nginx:1.21.6 + ports: + - containerPort: 8080 + imagePullPolicy: Always + securityContext: + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + readinessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 15 + periodSeconds: 20 \ No newline at end of file diff --git a/test/testdata/k8s/pass/replicaset.yaml b/test/testdata/k8s/pass/replicaset.yaml new file mode 100644 index 00000000..92618cd8 --- /dev/null +++ b/test/testdata/k8s/pass/replicaset.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: hello-replicaset + namespace: sample +spec: + replicas: 3 + selector: + matchLabels: + tier: hello-replicaset + template: + metadata: + labels: + tier: hello-replicaset + spec: + serviceAccountName: mondoo-test + serviceAccount: mondoo-test + containers: + - name: php-redis + image: gcr.io/google_samples/gb-frontend:v3 + imagePullPolicy: Always + livenessProbe: + exec: + command: + - /bin/sh + - -c + - exit 0 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + exec: + command: + - /bin/sh + - -c + - exit 0 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 100m + memory: 100Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 \ No newline at end of file diff --git a/test/testdata/k8s/pass/service.yaml b/test/testdata/k8s/pass/service.yaml new file mode 100644 index 00000000..4b62e448 --- /dev/null +++ b/test/testdata/k8s/pass/service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: hello-service + namespace: sample +spec: + selector: + app: sample-app + ports: + - protocol: TCP + port: 80 + targetPort: 8989 \ No newline at end of file diff --git a/test/testdata/k8s/pass/statefulset.yaml b/test/testdata/k8s/pass/statefulset.yaml new file mode 100644 index 00000000..c146377f --- /dev/null +++ b/test/testdata/k8s/pass/statefulset.yaml @@ -0,0 +1,87 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx + namespace: sample + labels: + app: nginx +spec: + ports: + - port: 80 + name: web + clusterIP: None + selector: + app: nginx +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: web + namespace: sample +spec: + selector: + matchLabels: + app: nginx # has to match .spec.template.metadata.labels + serviceName: "nginx" + replicas: 3 # by default is 1 + minReadySeconds: 10 # by default is 0 + template: + metadata: + labels: + app: nginx # has to match .spec.selector.matchLabels + spec: + serviceAccountName: mondoo-test + serviceAccount: mondoo-test + terminationGracePeriodSeconds: 10 + containers: + - name: nginx + image: k8s.gcr.io/nginx-slim:0.8 + imagePullPolicy: Always + ports: + - containerPort: 80 + name: web + livenessProbe: + exec: + command: + - /bin/sh + - -c + - exit 0 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + exec: + command: + - /bin/sh + - -c + - exit 0 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + cpu: 100m + memory: 100Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - name: www + mountPath: /usr/share/nginx/html + volumeClaimTemplates: + - metadata: + name: www + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "my-storage-class" + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/test/testdata/mondoo-kubernetes-security.mql.yaml b/test/testdata/mondoo-kubernetes-security.mql.yaml new file mode 100644 index 00000000..cea4a7b1 --- /dev/null +++ b/test/testdata/mondoo-kubernetes-security.mql.yaml @@ -0,0 +1,6454 @@ +# Copyright (c) Mondoo, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +policies: + - uid: mondoo-kubernetes-security + name: Kubernetes Cluster and Workload Security + version: 1.1.0 + license: BUSL-1.1 + tags: + mondoo.com/category: security + mondoo.com/platform: linux,kubernetes,k8s + authors: + - name: Mondoo, Inc + email: hello@mondoo.com + docs: + desc: |- + # Overview + + The Kubernetes Cluster and Workload Security by Mondoo provides guidance for establishing secure Kubernetes cluster configurations and workload deployments. + + If you have questions, comments, or have identified ways to improve this policy, please write us at hello@mondoo.com, or reach out in [GitHub Discussions](https://github.com/orgs/mondoohq/discussions). + + ## Remote scan + + Remote scans use native transports in cnspec to provide on demand scan results without the need to install any agents, or integration. + + For a complete list of native transports run: + + ```bash + cnspec scan --help + ``` + + ### Prerequisites + + Remote scans of Kubernetes clusters requires a `KUBECONFIG` with access to the cluster you want to scan. + + ### Scan a Kubernetes cluster + + Open a terminal and configure an environment variable with the path to your `KUBECONFIG`: + + ```bash + export KUBECONFIG=/path/to/kubeconfig + ``` + + Run a scan of the Kubernetes cluster: + + ```bash + cnspec scan k8s + ``` + + ## Join the community! + + Our goal is to build policies that are simple to deploy, accurate, and actionable. + + If you have any suggestions for how to improve this policy, or if you need support, [join the community](https://github.com/orgs/mondoohq/discussions) in GitHub Discussions. + groups: + - title: Kubernetes API Server + filters: | + asset.family.contains(_ == 'linux') + processes.where( executable == /kube-apiserver/ ).list != [] + checks: + - uid: mondoo-kubernetes-security-api-server-no-anonymous-auth + - uid: mondoo-kubernetes-security-https-api-server + - uid: mondoo-kubernetes-security-secure-admin-conf + - uid: mondoo-kubernetes-security-secure-controller-manager_conf + - uid: mondoo-kubernetes-security-secure-etcd-data-dir + - uid: mondoo-kubernetes-security-secure-kube-apiserver-yml + - uid: mondoo-kubernetes-security-secure-pki-directory + - uid: mondoo-kubernetes-security-secure-scheduler_conf + - title: Kubernetes kubelet + filters: | + asset.family.contains(_ == 'linux') + processes.where( executable == /kubelet/ ).list != [] + checks: + - uid: mondoo-kubernetes-security-kubelet-anonymous-authentication + - uid: mondoo-kubernetes-security-kubelet-authorization-mode + - uid: mondoo-kubernetes-security-kubelet-event-record-qps + - uid: mondoo-kubernetes-security-kubelet-iptables-util-chains + - uid: mondoo-kubernetes-security-kubelet-protect-kernel-defaults + - uid: mondoo-kubernetes-security-kubelet-read-only-port + - uid: mondoo-kubernetes-security-kubelet-rotate-certificates + - uid: mondoo-kubernetes-security-kubelet-strong-ciphers + - uid: mondoo-kubernetes-security-kubelet-tls-certificate + - uid: mondoo-kubernetes-security-secure-kubelet-cert-authorities + - uid: mondoo-kubernetes-security-secure-kubelet-config + - title: Kubernetes CronJobs Security + filters: asset.platform == "k8s-cronjob" + checks: + - uid: mondoo-kubernetes-security-cronjob-allowprivilegeescalation + - uid: mondoo-kubernetes-security-cronjob-capability-net-raw + - uid: mondoo-kubernetes-security-cronjob-capability-sys-admin + - uid: mondoo-kubernetes-security-cronjob-containerd-socket + - uid: mondoo-kubernetes-security-cronjob-crio-socket + - uid: mondoo-kubernetes-security-cronjob-docker-socket + - uid: mondoo-kubernetes-security-cronjob-hostipc + - uid: mondoo-kubernetes-security-cronjob-hostnetwork + - uid: mondoo-kubernetes-security-cronjob-hostpath-readonly + - uid: mondoo-kubernetes-security-cronjob-hostpid + - uid: mondoo-kubernetes-security-cronjob-imagepull + - uid: mondoo-kubernetes-security-cronjob-limitcpu + - uid: mondoo-kubernetes-security-cronjob-limitmemory + - uid: mondoo-kubernetes-security-cronjob-ports-hostport + - uid: mondoo-kubernetes-security-cronjob-privilegedcontainer + - uid: mondoo-kubernetes-security-cronjob-readonlyrootfilesystem + - uid: mondoo-kubernetes-security-cronjob-runasnonroot + - uid: mondoo-kubernetes-security-cronjob-serviceaccount + - title: Kubernetes StatefulSets Security + filters: asset.platform == "k8s-statefulset" + checks: + - uid: mondoo-kubernetes-security-statefulset-allowprivilegeescalation + - uid: mondoo-kubernetes-security-statefulset-capability-net-raw + - uid: mondoo-kubernetes-security-statefulset-capability-sys-admin + - uid: mondoo-kubernetes-security-statefulset-containerd-socket + - uid: mondoo-kubernetes-security-statefulset-crio-socket + - uid: mondoo-kubernetes-security-statefulset-docker-socket + - uid: mondoo-kubernetes-security-statefulset-hostipc + - uid: mondoo-kubernetes-security-statefulset-hostnetwork + - uid: mondoo-kubernetes-security-statefulset-hostpath-readonly + - uid: mondoo-kubernetes-security-statefulset-hostpid + - uid: mondoo-kubernetes-security-statefulset-imagepull + - uid: mondoo-kubernetes-security-statefulset-limitcpu + - uid: mondoo-kubernetes-security-statefulset-limitmemory + - uid: mondoo-kubernetes-security-statefulset-ports-hostport + - uid: mondoo-kubernetes-security-statefulset-privilegedcontainer + - uid: mondoo-kubernetes-security-statefulset-readonlyrootfilesystem + - uid: mondoo-kubernetes-security-statefulset-runasnonroot + - uid: mondoo-kubernetes-security-statefulset-serviceaccount + - title: Kubernetes Deployments Security + filters: asset.platform == "k8s-deployment" + checks: + - uid: mondoo-kubernetes-security-deployment-allowprivilegeescalation + - uid: mondoo-kubernetes-security-deployment-capability-net-raw + - uid: mondoo-kubernetes-security-deployment-capability-sys-admin + - uid: mondoo-kubernetes-security-deployment-containerd-socket + - uid: mondoo-kubernetes-security-deployment-crio-socket + - uid: mondoo-kubernetes-security-deployment-docker-socket + - uid: mondoo-kubernetes-security-deployment-hostipc + - uid: mondoo-kubernetes-security-deployment-hostnetwork + - uid: mondoo-kubernetes-security-deployment-hostpath-readonly + - uid: mondoo-kubernetes-security-deployment-hostpid + - uid: mondoo-kubernetes-security-deployment-imagepull + - uid: mondoo-kubernetes-security-deployment-k8s-dashboard + - uid: mondoo-kubernetes-security-deployment-limitcpu + - uid: mondoo-kubernetes-security-deployment-limitmemory + - uid: mondoo-kubernetes-security-deployment-ports-hostport + - uid: mondoo-kubernetes-security-deployment-privilegedcontainer + - uid: mondoo-kubernetes-security-deployment-readonlyrootfilesystem + - uid: mondoo-kubernetes-security-deployment-runasnonroot + - uid: mondoo-kubernetes-security-deployment-serviceaccount + - uid: mondoo-kubernetes-security-deployment-tiller + - title: Kubernetes Jobs Security + filters: asset.platform == "k8s-job" + checks: + - uid: mondoo-kubernetes-security-job-allowprivilegeescalation + - uid: mondoo-kubernetes-security-job-capability-net-raw + - uid: mondoo-kubernetes-security-job-capability-sys-admin + - uid: mondoo-kubernetes-security-job-containerd-socket + - uid: mondoo-kubernetes-security-job-crio-socket + - uid: mondoo-kubernetes-security-job-docker-socket + - uid: mondoo-kubernetes-security-job-hostipc + - uid: mondoo-kubernetes-security-job-hostnetwork + - uid: mondoo-kubernetes-security-job-hostpath-readonly + - uid: mondoo-kubernetes-security-job-hostpid + - uid: mondoo-kubernetes-security-job-imagepull + - uid: mondoo-kubernetes-security-job-limitcpu + - uid: mondoo-kubernetes-security-job-limitmemory + - uid: mondoo-kubernetes-security-job-ports-hostport + - uid: mondoo-kubernetes-security-job-privilegedcontainer + - uid: mondoo-kubernetes-security-job-readonlyrootfilesystem + - uid: mondoo-kubernetes-security-job-runasnonroot + - uid: mondoo-kubernetes-security-job-serviceaccount + - title: Kubernetes ReplicaSets Security + filters: asset.platform == "k8s-replicaset" + checks: + - uid: mondoo-kubernetes-security-replicaset-allowprivilegeescalation + - uid: mondoo-kubernetes-security-replicaset-capability-net-raw + - uid: mondoo-kubernetes-security-replicaset-capability-sys-admin + - uid: mondoo-kubernetes-security-replicaset-containerd-socket + - uid: mondoo-kubernetes-security-replicaset-crio-socket + - uid: mondoo-kubernetes-security-replicaset-docker-socket + - uid: mondoo-kubernetes-security-replicaset-hostipc + - uid: mondoo-kubernetes-security-replicaset-hostnetwork + - uid: mondoo-kubernetes-security-replicaset-hostpath-readonly + - uid: mondoo-kubernetes-security-replicaset-hostpid + - uid: mondoo-kubernetes-security-replicaset-imagepull + - uid: mondoo-kubernetes-security-replicaset-limitcpu + - uid: mondoo-kubernetes-security-replicaset-limitmemory + - uid: mondoo-kubernetes-security-replicaset-ports-hostport + - uid: mondoo-kubernetes-security-replicaset-privilegedcontainer + - uid: mondoo-kubernetes-security-replicaset-readonlyrootfilesystem + - uid: mondoo-kubernetes-security-replicaset-runasnonroot + - uid: mondoo-kubernetes-security-replicaset-serviceaccount + - title: Kubernetes DaemonSets Security + filters: asset.platform == "k8s-daemonset" + checks: + - uid: mondoo-kubernetes-security-daemonset-allowprivilegeescalation + - uid: mondoo-kubernetes-security-daemonset-capability-net-raw + - uid: mondoo-kubernetes-security-daemonset-capability-sys-admin + - uid: mondoo-kubernetes-security-daemonset-containerd-socket + - uid: mondoo-kubernetes-security-daemonset-crio-socket + - uid: mondoo-kubernetes-security-daemonset-docker-socket + - uid: mondoo-kubernetes-security-daemonset-hostipc + - uid: mondoo-kubernetes-security-daemonset-hostnetwork + - uid: mondoo-kubernetes-security-daemonset-hostpath-readonly + - uid: mondoo-kubernetes-security-daemonset-hostpid + - uid: mondoo-kubernetes-security-daemonset-imagepull + - uid: mondoo-kubernetes-security-daemonset-limitcpu + - uid: mondoo-kubernetes-security-daemonset-limitmemory + - uid: mondoo-kubernetes-security-daemonset-ports-hostport + - uid: mondoo-kubernetes-security-daemonset-privilegedcontainer + - uid: mondoo-kubernetes-security-daemonset-readonlyrootfilesystem + - uid: mondoo-kubernetes-security-daemonset-runasnonroot + - uid: mondoo-kubernetes-security-daemonset-serviceaccount + - title: Kubernetes Pods Security + filters: asset.platform == "k8s-pod" + checks: + - uid: mondoo-kubernetes-security-pod-allowprivilegeescalation + - uid: mondoo-kubernetes-security-pod-capability-net-raw + - uid: mondoo-kubernetes-security-pod-capability-sys-admin + - uid: mondoo-kubernetes-security-pod-containerd-socket + - uid: mondoo-kubernetes-security-pod-crio-socket + - uid: mondoo-kubernetes-security-pod-docker-socket + - uid: mondoo-kubernetes-security-pod-hostipc + - uid: mondoo-kubernetes-security-pod-hostnetwork + - uid: mondoo-kubernetes-security-pod-hostpath-readonly + - uid: mondoo-kubernetes-security-pod-hostpid + - uid: mondoo-kubernetes-security-pod-imagepull + - uid: mondoo-kubernetes-security-pod-k8s-dashboard + - uid: mondoo-kubernetes-security-pod-limitcpu + - uid: mondoo-kubernetes-security-pod-limitmemory + - uid: mondoo-kubernetes-security-pod-ports-hostport + - uid: mondoo-kubernetes-security-pod-privilegedcontainer + - uid: mondoo-kubernetes-security-pod-readonlyrootfilesystem + - uid: mondoo-kubernetes-security-pod-runasnonroot + - uid: mondoo-kubernetes-security-pod-serviceaccount + - uid: mondoo-kubernetes-security-pod-tiller + scoring_system: 2 +props: + - uid: allowedCiphers + title: Define the hardened SSL/ TLS ciphers + mql: | + return ["TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_RSA_WITH_3DES_EDE_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_256_GCM_SHA384"] +queries: + - uid: mondoo-kubernetes-security-kubelet-anonymous-authentication + title: Disable anonymous authentication for kubelet + impact: 100 + mql: | + k8s.kubelet.configuration['authentication']['anonymous']['enabled'] == false + docs: + desc: | + Ensure that the kubelet is configured to disable anonymous requests to the kubelet server. + Otherwise the kubelet will allow unauthenticated access to its HTTPS endpoint. Request will have the privileges of the role `system:public-info-viewer`. This might expose data to an attacker. + audit: | + If running the kubelet with the CLI parameter '--anonymous-auth', or running with 'authentication.anonymous.enabled' defined in the kubelet configuration file, ensure that the value is set to 'false'. + remediation: | + Set the '--anonymous-auth' CLI parameter and/or the 'authentication.anonymous.enabled' field in the kubelet configuration file to 'false'. + refs: + - url: https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication + title: Kubelet authentication + - uid: mondoo-kubernetes-security-kubelet-event-record-qps + title: Configure kubelet to capture all event creation + impact: 30 + mql: | + k8s.kubelet.configuration['eventRecordQPS'] == 0 + docs: + desc: | + Ensure that the kubelet is configured to capture all event creation so as to avoid potentially not logging important events. + Be aware that this might expose your Cluster to a DoS risk. + audit: | + If running the kubelet with the CLI parameter '--event-qps', or running with 'eventRecordQPS' defined in the kubelet configuration file, ensure that the value is set to '0'. + remediation: | + Set the '--event-qps' CLI parameter and/or the 'eventRecordQPS' field in the kubelet configuration file to '0'. + refs: + - url: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration + title: Kubelet configuration + - uid: mondoo-kubernetes-security-kubelet-iptables-util-chains + title: Configure kubelet to ensure IPTables rules are set on host + impact: 30 + mql: | + k8s.kubelet.configuration['makeIPTablesUtilChains'] == true + docs: + desc: | + Ensure that the kubelet is set up to create IPTable utility rules for various kubernetes components. + audit: | + If running the kubelet with the CLI parameter '--make-iptables-util-chains', or running with 'makeIPTablesUtilChains' defined in the kubelet configuration file, ensure that the value is set to 'true'. + remediation: | + Set the '--make-iptables-util-chains' CLI parameter and/or the 'makeIPTablesUtilChains' field in the kubelet configuration file to 'true'. + refs: + - url: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration + title: Kubelet configuration + - uid: mondoo-kubernetes-security-kubelet-protect-kernel-defaults + title: Configure kubelet to protect kernel defaults + impact: 60 + mql: | + k8s.kubelet.configuration["protectKernelDefaults"] == "true" + docs: + desc: | + Ensure that the kubelet is set up to error if the underlying kernel tunables are different than the kubelet defaults. By default the kubelet will attempt to modify the kernel as the kubelet starts up. + audit: | + If running the kubelet with the CLI parameter '--protect-kernel-defaults', or running with 'protectKernelDefaults' defined in the kubelet configuration file, ensure that the value is set to 'true'. + remediation: | + Set the '--protect-kernel-defaults' CLI parameter and/or the 'protectKernelDefaults' field in the kubelet configuration file to 'true'. + refs: + - url: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration + title: Kubelet configuration + - uid: mondoo-kubernetes-security-kubelet-read-only-port + title: Do not allow unauthenticated read-only port on kubelet + impact: 60 + mql: | + k8s.kubelet.configuration['readOnlyPort'] == 0 || k8s.kubelet.configuration['readOnlyPort'] == null + docs: + desc: | + Ensure the kubelet is not configured to serve up unauthenticated read-only access. + This would expose data to unauthenticated users. + audit: | + If running the kubelet with the CLI parameter '--read-only-port', or running with 'readOnlyPort' defined in the kubelet configuration file, ensure that the value is either '0' or simply not set ('0' is the default). + remediation: | + Set the '--read-only-port' CLI parameter or the 'readOnlyPort' field in the kubelet configuration file to '0'. + refs: + - url: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration + title: Kubelet configuration + - uid: mondoo-kubernetes-security-kubelet-authorization-mode + title: Ensure the kubelet is not configured with the AlwaysAllow authorization mode + impact: 100 + mql: | + k8s.kubelet.configuration['authorization']['mode'] != "AlwaysAllow" + docs: + desc: | + Ensure the kubelet is not configured with the AlwaysAllow authorization mode. + It would allow all requests. + audit: | + If running the kubelet with the CLI parameter '--authorization-mode', or running with 'authorization.mode' defined in the kubelet configuration file, ensure that the value is not set to 'AlwaysAllow'. + remediation: | + If the kubelet is configured with the CLI parameter '--authorization-mode', set it to something that isn't 'AlwaysAllow' (eg 'Webhook'). + + If the kubelet is configured via the kubelet config file with the 'authorization.mode' parameter, set it to something that isn't 'AlwaysAllow' (eg. 'Webhook'). + refs: + - url: https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authorization + title: Kubelet authorization + - uid: mondoo-kubernetes-security-kubelet-strong-ciphers + title: Configure kubelet to use only strong cryptography + impact: 100 + props: + - uid: allowedCiphers + title: Define the hardened SSL/ TLS ciphers + mql: | + return ["TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_RSA_WITH_3DES_EDE_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_256_GCM_SHA384"] + mql: | + k8s.kubelet.configuration['tlsCipherSuites'] != null + if (k8s.kubelet.configuration['tlsCipherSuites'] != null) { + k8s.kubelet.configuration['tlsCipherSuites'].map( _.trim ).containsOnly(props.allowedCiphers) + } + docs: + desc: | + Ensure the kubelet runs with only strong cryptography support. Weak or old ciphers might expose your data. + audit: | + If running the kubelet with the CLI parameter '--tls-cipher-suites', or running with 'tlsCipherSuites' defined in the kubelet configuration file, ensure that the list of allowed ciphers is not empty and that all included ciphers are included in the following list: + + "TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256", "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Define the list of allowed TLS ciphers to include only items from the strong list of ciphers. + + If the kubelet is configured with the CLI parameter '--tls-cipher-suites', update the list (or define the parameter) to only include strong ciphers. + + If the kubelet is configured via the kubelet config file with the 'tlsCipherSuites' parameter, update the list (or create an entry for 'tlsCipherSuites') to only include string ciphers. + refs: + - url: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration + title: Kubelet configuration + - uid: mondoo-kubernetes-security-kubelet-tls-certificate + title: Run kubelet with a user-provided certificate/key + impact: 100 + mql: | + k8s.kubelet.configuration["tlsCertFile"] != null + k8s.kubelet.configuration["tlsPrivateKeyFile"] != null + docs: + desc: | + Ensure that the kubelet is not running with self-signed certificates generated by the kubelet itself. + audit: | + The kubelet CLI parameters override values in the kubelet configuration file. + + Check the kubelet CLI parameters to see whether '--tls-cert-file' and '--tls-private-key' are set to a non-empty path/string. + + Check the kubelet configuration file to see whether 'tlsCertFile' and 'tlsPrivateKeyFile' are set to a non-empty path/string. + remediation: | + Configure the kubelet to use a user-provided certificate/key pair for serving up HTTPS. + + After acquiring the TLS certificate/key pair, update the kubelet configuration file + + Or if using the deprecated kubelet CLI parameters, update the '--tls-cert-file' and '--tls-private-key-file' parameters to use the new certificate/key. + - uid: mondoo-kubernetes-security-kubelet-rotate-certificates + title: Run kubelet with automatic certificate rotation + impact: 80 + mql: | + k8s.kubelet.configuration["rotateCertificates"] != "false" + docs: + desc: | + Ensure the kubelet is running with automatic certificate rotation so that the kubelet will automatically renew certificates with the API server as certificates near expiration. + Otherwise the communication between the kubelet and the API server will be interrupted. + audit: | + Check the kubelet CLI parameters to ensure '--rotate-certificates' is not set to false, and that the kubelet config file has not set 'rotateCertificates' to false. + remediation: | + Depending on where the configuration behavior is defined (CLI parameters override config file values), update the kubelet CLI parameters to set '--rotate-certificates' to true, and/or update the kubelet configuration to set 'rotateCertificates' to true. + refs: + - url: https://kubernetes.io/docs/tasks/tls/certificate-rotation/ + title: Configure Certificate Rotation for the Kubelet + - uid: mondoo-kubernetes-security-secure-kubelet-config + title: Ownership and permissions of kubelet configuration should be restricted + impact: 80 + mql: | + if (k8s.kubelet.configFile != null) { + if (k8s.kubelet.configFile.exists) { + k8s.kubelet.configFile { + user.name == "root" + group.name == "root" + } + k8s.kubelet.configFile.permissions { + user_readable == true + user_executable == false + group_readable == false + group_writeable == false + group_executable == false + other_readable == false + other_writeable == false + other_executable == false + } + } + } + docs: + desc: | + Ensure proper file ownership and read-write-execute permissions for kubelet configuration file. + Otherwise unprivileged users might get access to sensitive information. + audit: | + View the kubelet configuration file details: + + ``` + $ ls -l /etc/kubernetes/kubelet.conf + -rw-r--r-- 1 root root 1155 Sep 21 15:03 /etc/kubernetes/kubelet.conf + ``` + remediation: | + Update the ownership and permissions: + + ``` + chown root:root /etc/kubernetes/kubelet.conf + chmod 600 /etc/kubernetes/kubelet.conf + ``` + - uid: mondoo-kubernetes-security-secure-kubelet-cert-authorities + title: Specify a kubelet certificate authorities file and ensure proper ownership and permissions + impact: 100 + mql: | + k8s.kubelet.configuration['authentication']['x509']['clientCAFile'] != null + if (k8s.kubelet.configuration['authentication']['x509']['clientCAFile'] != null) { + cafile = k8s.kubelet.configuration["authentication"]["x509"]["clientCAFile"] + file(cafile) { + user.name == "root" + group.name == "root" + } + file(cafile).permissions { + user_readable == true + user_executable == false + group_readable == false + group_writeable == false + group_executable == false + other_readable == false + other_writeable == false + other_executable == false + } + } + docs: + desc: | + Ensure appropriate ownership and permissions for the kubelet's certificate authorities configuration file. + audit: | + View the ownership and permissions: + + ``` + $ ls -l /etc/srv/kubernetes/pki/ca-certificates.crt + -rw------- 1 root root 1159 Sep 13 04:14 /etc/srv/kubernetes/pki/ca-certificates.crt + ``` + remediation: | + Update the ownership and permissions: + + ``` + chown root:root /etc/srv/kubernetes/pki/ca-certificates.crt + chmod 600 /etc/srv/kubernetes/pki/ca-certificates.crt + ``` + - uid: mondoo-kubernetes-security-secure-kube-apiserver-yml + title: Set secure file permissions on the API server pod specification file + impact: 60 + mql: | + if (file("/etc/kubernetes/manifests/kube-apiserver.yaml").exists) { + file("/etc/kubernetes/manifests/kube-apiserver.yaml") { + permissions.user_writeable == true + permissions.group_writeable == false + permissions.other_writeable == false + permissions.user_readable == true + permissions.group_readable == false + permissions.other_readable == false + permissions.user_executable == false + permissions.group_executable == false + permissions.other_executable == false + user.name == "root" + group.name == "root" + } + } + docs: + desc: | + Ensure that the API server pod specification file has permissions of `600` and is owned by `root:root`. + Otherwise unprivileged users might change it. + remediation: |- + Run this command on the Control Plane node: + + ``` + chmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml + chown root:root /etc/kubernetes/manifests/kube-apiserver.yaml + ``` + - uid: mondoo-kubernetes-security-secure-etcd-data-dir + title: | + Set secure directory permissions on the etcd data directory. + Otherwise unprivileged users might get access to sensitive data stored in etcd, i.e., Kubernetes Secrets. + impact: 60 + mql: | + if (file("/var/lib/etcd").exists) { + file("/var/lib/etcd") { + permissions.user_writeable == true + permissions.group_writeable == false + permissions.other_writeable == false + permissions.user_readable == true + permissions.group_readable == false + permissions.other_readable == false + permissions.user_executable == true + permissions.group_executable == false + permissions.other_executable == false + user.name == "etcd" + group.name == "etcd" + } + } else { + dir = processes.where( executable == /etcd/ ).list[0].flags["data-dir"] + file(dir) { + permissions.user_writeable == true + permissions.group_writeable == false + permissions.other_writeable == false + permissions.user_readable == true + permissions.group_readable == false + permissions.other_readable == false + permissions.user_executable == true + permissions.group_executable == false + permissions.other_executable == false + user.name == "etcd" + group.name == "etcd" + } + } + docs: + desc: Ensure that the etcd data directory has permissions of `700` and is owned by `etcd:etcd`. + remediation: |- + On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: + + ``` + ps -ef | grep etcd + ``` + + Run the below command: + + ``` + chmod 700 /var/lib/etcd + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/secret/ + title: Kubernetes Secrets + - uid: mondoo-kubernetes-security-secure-admin-conf + title: Set secure file permissions on the admin.conf file + impact: 60 + mql: | + if (file("/etc/kubernetes/admin.conf").exists) { + file("/etc/kubernetes/admin.conf") { + permissions.user_writeable == true + permissions.group_writeable == false + permissions.other_writeable == false + permissions.user_readable == true + permissions.group_readable == false + permissions.other_readable == false + permissions.user_executable == false + permissions.group_executable == false + permissions.other_executable == false + user.name == "root" + group.name == "root" + } + } + docs: + desc: | + Ensure that the `admin.conf` file has permissions of `600` and is owned by root:root. + Otherwise unprivileged users might get admin access to the Kubernetes API server. + remediation: |- + Run this command on the Control Plane node: + + ``` + chmod 600 /etc/kubernetes/admin.conf + chown root:root /etc/kubernetes/admin.conf + ``` + refs: + - url: https://kubernetes.io/docs/setup/ + title: Kubernetes Setup + - uid: mondoo-kubernetes-security-secure-scheduler_conf + title: Set secure file permissions on the scheduler.conf file + impact: 60 + mql: | + if (file("/etc/kubernetes/scheduler.conf").exists) { + file("/etc/kubernetes/scheduler.conf") { + permissions.user_writeable == true + permissions.group_writeable == false + permissions.other_writeable == false + permissions.user_readable == true + permissions.group_readable == false + permissions.other_readable == false + permissions.user_executable == false + permissions.group_executable == false + permissions.other_executable == false + user.name == "root" + group.name == "root" + } + } + docs: + desc: Ensure that the `scheduler.conf` file has permissions of `600` and is owned by `root:root`. + remediation: |- + Run this command on the Control Plane node: + + ``` + chmod 600 /etc/kubernetes/scheduler.conf + chown root:root /etc/kubernetes/scheduler.conf + ``` + - uid: mondoo-kubernetes-security-secure-controller-manager_conf + title: Set secure file permissions on the controller-manager.conf file + impact: 60 + mql: | + if (file("/etc/kubernetes/controller-manager.conf").exists) { + file("/etc/kubernetes/controller-manager.conf") { + permissions.user_writeable == true + permissions.group_writeable == false + permissions.other_writeable == false + permissions.user_readable == true + permissions.group_readable == false + permissions.other_readable == false + permissions.user_executable == false + permissions.group_executable == false + permissions.other_executable == false + user.name == "root" + group.name == "root" + } + } + docs: + desc: Ensure that the `controller-manager.conf` file has permissions of `600` and is owned by `root:root`. + remediation: |- + Run this command on the Control Plane node: + + ``` + chmod 600 /etc/kubernetes/controller-manager.conf + chown root:root /etc/kubernetes/controller-manager.conf + ``` + - uid: mondoo-kubernetes-security-secure-pki-directory + title: Ensure that the Kubernetes PKI/SSL directory is owned by root:root + impact: 65 + mql: | + if (processes.where(executable == /kube-apiserver/).list[0].flags["etcd-certfile"] != null) { + clientCAFile = processes.where(executable == /kube-apiserver/).list[0].flags["etcd-certfile"] + ssldir = file(clientCAFile).dirname + file(ssldir) { + user.name == "root" + group.name == "root" + } + } else { + file("/etc/kubernetes/pki") { + user.name == "root" + group.name == "root" + } + } + docs: + desc: | + Ensure that the Kubernetes PKI/SSL directory is owned by `root:root`. + Otherwise unprivileged users could change the PKI/SSL certificates the whole encryption of the cluster relies on. + remediation: |- + Run one of these commands on the Control Plane node depending on the location of your PKI/SSL directory: + + ``` + chown -R root:root /etc/kubernetes/pki/ + ``` + + or + + ``` + chown -R root:root /etc/kubernetes/ssl/ + ```` + refs: + - url: https://kubernetes.io/docs/setup/best-practices/certificates/ + title: PKI certificates and requirements + - uid: mondoo-kubernetes-security-https-api-server + title: Ensure the kube-apiserver is not listening on an insecure HTTP port + impact: 70 + mql: | + processes.where(executable == /kube-apiserver/).list { + flags["insecure-port"] == 0 + } + docs: + desc: | + Ensure the kube-apiserver is not listening on an insecure HTTP port. + Otherwise unencrypted traffic could be intercepted and sensitive data could be leaked. + remediation: |- + Find the kube-apiserver process and check the `insecure-port` argument. If the argument is set to `0`, then the kube-apiserver is not listening on an insecure HTTP port: + ``` + ps aux | grep kube-apiserver + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/controlling-access/#transport-security + title: Controlling Access to the Kubernetes API - Transport security + - uid: mondoo-kubernetes-security-api-server-no-anonymous-auth + title: | + Ensure the kube-apiserver does not allow anonymous authentication. + When allowed, request will have the privileges of the role `system:public-info-viewer`. This might expose data to an attacker. + impact: 100 + mql: | + processes.where(executable == /kube-apiserver/).list { + flags["anonymous-auth"] == "false" + } + docs: + desc: Ensure the kube-apiserver does not allow anonymous authentication. + remediation: |- + Find the kube-apiserver process and check the `--anonymous-auth` argument. If the argument is set to `false`, then the kube-apiserver does not allow anonymous authentication: + ``` + ps aux | grep kube-apiserver + ``` + refs: + - url: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#anonymous-requests + title: Anonymous requests + - uid: mondoo-kubernetes-security-pod-docker-socket + title: Container should not mount the Docker socket + impact: 100 + mql: | + k8s.pod { + podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') + } + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/docker.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-cronjob-docker-socket + title: Container should not mount the Docker socket + impact: 100 + mql: | + k8s.cronjob { + podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') + } + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/docker.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-statefulset-docker-socket + title: Container should not mount the Docker socket + impact: 100 + mql: k8s.statefulset.podSpec['volumes'] == null || k8s.statefulset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/docker.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-deployment-docker-socket + title: Container should not mount the Docker socket + impact: 100 + mql: k8s.deployment.podSpec['volumes'] == null || k8s.deployment.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/docker.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-job-docker-socket + title: Container should not mount the Docker socket + impact: 100 + mql: k8s.job.podSpec['volumes'] == null || k8s.job.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/docker.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-replicaset-docker-socket + title: Container should not mount the Docker socket + impact: 100 + mql: k8s.replicaset.podSpec['volumes'] == null || k8s.replicaset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/docker.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-daemonset-docker-socket + title: Container should not mount the Docker socket + impact: 100 + mql: k8s.daemonset.podSpec['volumes'] == null || k8s.daemonset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/docker.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-pod-containerd-socket + title: Container should not mount the containerd socket + impact: 100 + mql: k8s.pod.podSpec['volumes'] == null || k8s.pod.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /run/containerd/containerd.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-cronjob-containerd-socket + title: Container should not mount the containerd socket + impact: 100 + mql: k8s.cronjob.podSpec['volumes'] == null || k8s.cronjob.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /run/containerd/containerd.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-statefulset-containerd-socket + title: Container should not mount the containerd socket + impact: 100 + mql: k8s.statefulset.podSpec['volumes'] == null || k8s.statefulset.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /run/containerd/containerd.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-deployment-containerd-socket + title: Container should not mount the containerd socket + impact: 100 + mql: k8s.deployment.podSpec['volumes'] == null || k8s.deployment.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /run/containerd/containerd.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-job-containerd-socket + title: Container should not mount the containerd socket + impact: 100 + mql: k8s.job.podSpec['volumes'] == null || k8s.job.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /run/containerd/containerd.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-replicaset-containerd-socket + title: Container should not mount the containerd socket + impact: 100 + mql: k8s.replicaset.podSpec['volumes'] == null || k8s.replicaset.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /run/containerd/containerd.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-daemonset-containerd-socket + title: Container should not mount the containerd socket + impact: 100 + mql: k8s.daemonset.podSpec['volumes'] == null || k8s.daemonset.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /run/containerd/containerd.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-pod-crio-socket + title: Container should not mount the CRI-O socket + impact: 100 + mql: k8s.pod.podSpec['volumes'] == null || k8s.pod.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/crio/crio.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-cronjob-crio-socket + title: Container should not mount the CRI-O socket + impact: 100 + mql: k8s.cronjob.podSpec['volumes'] == null || k8s.cronjob.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/crio/crio.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-statefulset-crio-socket + title: Container should not mount the CRI-O socket + impact: 100 + mql: k8s.statefulset.podSpec['volumes'] == null || k8s.statefulset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/crio/crio.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-deployment-crio-socket + title: Container should not mount the CRI-O socket + impact: 100 + mql: k8s.deployment.podSpec['volumes'] == null || k8s.deployment.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/crio/crio.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-job-crio-socket + title: Container should not mount the CRI-O socket + impact: 100 + mql: k8s.job.podSpec['volumes'] == null || k8s.job.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/crio/crio.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-replicaset-crio-socket + title: Container should not mount the CRI-O socket + impact: 100 + mql: k8s.replicaset.podSpec['volumes'] == null || k8s.replicaset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/crio/crio.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-daemonset-crio-socket + title: Container should not mount the CRI-O socket + impact: 100 + mql: k8s.daemonset.podSpec['volumes'] == null || k8s.daemonset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/crio/crio.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-pod-allowprivilegeescalation + title: Container should not allow privilege escalation + impact: 100 + mql: | + k8s.pod.ephemeralContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.pod.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.pod.containers.all( securityContext['allowPrivilegeEscalation'] != true ) + docs: + desc: | + Do not allow privilege escalation in containers. + Even, when the container is not running as root, it could still escalate privileges. + audit: | + Check for the existence of `allowPrivilegeEscalation: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: true + ``` + remediation: | + Ensure `allowPrivilegeEscalation` is set to `false` or not present in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-cronjob-allowprivilegeescalation + title: Container should not allow privilege escalation + impact: 100 + mql: | + k8s.cronjob.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.cronjob.containers.all( securityContext['allowPrivilegeEscalation'] != true ) + docs: + desc: | + Do not allow privilege escalation in containers. + Even, when the container is not running as root, it could still escalate privileges. + audit: | + Check for the existence of `allowPrivilegeEscalation: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: true + ``` + remediation: | + Ensure `allowPrivilegeEscalation` is set to `false` or not present in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-statefulset-allowprivilegeescalation + title: Container should not allow privilege escalation + impact: 100 + mql: | + k8s.statefulset.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.statefulset.containers.all( securityContext['allowPrivilegeEscalation'] != true ) + docs: + desc: | + Do not allow privilege escalation in containers. + Even, when the container is not running as root, it could still escalate privileges. + audit: | + Check for the existence of `allowPrivilegeEscalation: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: true + ``` + remediation: | + Ensure `allowPrivilegeEscalation` is set to `false` or not present in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-deployment-allowprivilegeescalation + title: Container should not allow privilege escalation + impact: 100 + mql: | + k8s.deployment.containers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.deployment.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + docs: + desc: | + AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. + audit: | + Check for the existence of `allowPrivilegeEscalation: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: true + ``` + remediation: | + Ensure `allowPrivilegeEscalation` is set to `false` or not present in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-job-allowprivilegeescalation + title: Container should not allow privilege escalation + impact: 100 + mql: | + k8s.job.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.job.containers.all( securityContext['allowPrivilegeEscalation'] != true ) + docs: + desc: | + Do not allow privilege escalation in containers. + Even, when the container is not running as root, it could still escalate privileges. + audit: | + Check for the existence of `allowPrivilegeEscalation: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: true + ``` + remediation: | + Ensure `allowPrivilegeEscalation` is set to `false` or not present in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-replicaset-allowprivilegeescalation + title: Container should not allow privilege escalation + impact: 100 + mql: | + k8s.replicaset.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.replicaset.containers.all( securityContext['allowPrivilegeEscalation'] != true ) + docs: + desc: | + Do not allow privilege escalation in containers. + Even, when the container is not running as root, it could still escalate privileges. + audit: | + Check for the existence of `allowPrivilegeEscalation: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: true + ``` + remediation: | + Ensure `allowPrivilegeEscalation` is set to `false` or not present in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-daemonset-allowprivilegeescalation + title: Container should not allow privilege escalation + impact: 100 + mql: | + k8s.daemonset.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.daemonset.containers.all( securityContext['allowPrivilegeEscalation'] != true ) + docs: + desc: | + Do not allow privilege escalation in containers. + Even, when the container is not running as root, it could still escalate privileges. + audit: | + Check for the existence of `allowPrivilegeEscalation: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: true + ``` + remediation: | + Ensure `allowPrivilegeEscalation` is set to `false` or not present in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-pod-privilegedcontainer + title: Container should not run as a privileged container + impact: 100 + mql: | + k8s.pod.ephemeralContainers.all( securityContext['privileged'] != true ) + k8s.pod.initContainers.all( securityContext['privileged'] != true ) + k8s.pod.containers.all( securityContext['privileged'] != true ) + docs: + desc: | + Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. + audit: | + Check for the existence of `privileged: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: true + ``` + remediation: | + Remove the `privileged` setting from the container spec: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + + Or explicitly set `privileged` to `false`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-cronjob-privilegedcontainer + title: Container should not run as a privileged container + impact: 100 + mql: | + k8s.cronjob.initContainers.all( securityContext['privileged'] != true ) + k8s.cronjob.containers.all( securityContext['privileged'] != true ) + docs: + desc: | + Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. + audit: | + Check for the existence of `privileged: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: true + ``` + remediation: | + Remove the `privileged` setting from the container spec: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + + Or explicitly set `privileged` to `false`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-statefulset-privilegedcontainer + title: Container should not run as a privileged container + impact: 100 + mql: | + k8s.statefulset.initContainers.all( securityContext['privileged'] != true ) + k8s.statefulset.containers.all( securityContext['privileged'] != true ) + docs: + desc: | + Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. + audit: | + Check for the existence of `privileged: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: true + ``` + remediation: | + Remove the `privileged` setting from the container spec: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + + Or explicitly set `privileged` to `false`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-deployment-privilegedcontainer + title: Container should not run as a privileged container + impact: 100 + mql: | + k8s.deployment.containers.all( securityContext['privileged'] != true ) + k8s.deployment.initContainers.all( securityContext['privileged'] != true ) + docs: + desc: | + Running a privileged container means that the container has the host's capabilities including access to all devices and the host's network. + audit: | + Check for the existence of `privileged: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: true + ``` + remediation: | + Remove the `privileged` setting from the container spec: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + + Or explicitly set `privileged` to `false`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-job-privilegedcontainer + title: Container should not run as a privileged container + impact: 100 + mql: | + k8s.job.initContainers.all( securityContext['privileged'] != true ) + k8s.job.containers.all( securityContext['privileged'] != true ) + docs: + desc: | + Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. + audit: | + Check for the existence of `privileged: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: true + ``` + remediation: | + Remove the `privileged` setting from the container spec: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + + Or explicitly set `privileged` to `false`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-replicaset-privilegedcontainer + title: Container should not run as a privileged container + impact: 100 + mql: | + k8s.replicaset.initContainers.all( securityContext['privileged'] != true ) + k8s.replicaset.containers.all( securityContext['privileged'] != true ) + docs: + desc: | + Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. + audit: | + Check for the existence of `privileged: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: true + ``` + remediation: | + Remove the `privileged` setting from the container spec: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + + Or explicitly set `privileged` to `false`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-daemonset-privilegedcontainer + title: Container should not run as a privileged container + impact: 100 + mql: | + k8s.daemonset.initContainers.all( securityContext['privileged'] != true ) + k8s.daemonset.containers.all( securityContext['privileged'] != true ) + docs: + desc: | + Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. + audit: | + Check for the existence of `privileged: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: true + ``` + remediation: | + Remove the `privileged` setting from the container spec: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + + Or explicitly set `privileged` to `false`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-pod-readonlyrootfilesystem + title: Container should use an immutable root filesystem + impact: 80 + mql: | + k8s.pod.ephemeralContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.pod.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.pod.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + docs: + desc: | + Running a container with an immutable (read-only) file system prevents the modification of running containers. + audit: | + Check for the existence of `readOnlyRootFilesystem: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + remediation: | + Ensure `readOnlyRootFilesystem` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-cronjob-readonlyrootfilesystem + title: Container should use an immutable root filesystem + impact: 80 + mql: | + k8s.cronjob.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.cronjob.containers.all( securityContext['readOnlyRootFilesystem'] == true ) + docs: + desc: | + Running a container with an immutable (read-only) file system prevents the modification of running containers. + audit: | + Check for the existence of `readOnlyRootFilesystem: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + remediation: | + Ensure `readOnlyRootFilesystem` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-statefulset-readonlyrootfilesystem + title: Container should use an immutable root filesystem + impact: 80 + mql: | + k8s.statefulset.containers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.statefulset.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + docs: + desc: | + Running a container with an immutable (read-only) file system prevents the modification of running containers. + audit: | + Check for the existence of `readOnlyRootFilesystem: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + remediation: | + Ensure `readOnlyRootFilesystem` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-deployment-readonlyrootfilesystem + title: Container should use an immutable root filesystem + impact: 80 + mql: | + k8s.deployment.containers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.deployment.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + docs: + desc: | + Running a container with an immutable (read-only) file system prevents the modification of running containers. + audit: | + Check for the existence of `readOnlyRootFilesystem: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + remediation: | + Ensure `readOnlyRootFilesystem` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-job-readonlyrootfilesystem + title: Container should use an immutable root filesystem + impact: 80 + mql: | + k8s.job.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.job.containers.all( securityContext['readOnlyRootFilesystem'] == true ) + docs: + desc: | + Running a container with an immutable (read-only) file system prevents the modification of running containers. + audit: | + Check for the existence of `readOnlyRootFilesystem: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + remediation: | + Ensure `readOnlyRootFilesystem` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-replicaset-readonlyrootfilesystem + title: Container should use an immutable root filesystem + impact: 80 + mql: | + k8s.replicaset.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.replicaset.containers.all( securityContext['readOnlyRootFilesystem'] == true ) + docs: + desc: | + Running a container with an immutable (read-only) file system prevents the modification of running containers. + audit: | + Check for the existence of `readOnlyRootFilesystem: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + remediation: | + Ensure `readOnlyRootFilesystem` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-daemonset-readonlyrootfilesystem + title: Container should use an immutable root filesystem + impact: 80 + mql: | + k8s.daemonset.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.daemonset.containers.all( securityContext['readOnlyRootFilesystem'] == true ) + docs: + desc: | + Running a container with an immutable (read-only) file system prevents the modification of running containers. + audit: | + Check for the existence of `readOnlyRootFilesystem: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + remediation: | + Ensure `readOnlyRootFilesystem` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-pod-runasnonroot + title: Container should not run as root + impact: 100 + mql: | + if (k8s.pod.annotations['policies.k8s.mondoo.com/mondoo-kubernetes-security-pod-runasnonroot'] != 'ignore') { + k8s.pod { + podSecurityContext=podSpec['securityContext'] + ephemeralContainers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + initContainers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + containers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + } + } + docs: + desc: | + Set the `runAsNonRoot: true` `securityContext` to ensure containers do not run as the root user. + When containers run as the `root` user, they have the same privileges as `root` on the host system. + audit: | + Check for the existence of `runAsNonRoot: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `runAsNonRoot` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-cronjob-runasnonroot + title: Container should not run as root + impact: 100 + mql: | + if (k8s.cronjob.annotations['policies.k8s.mondoo.com/mondoo-kubernetes-security-cronjob-runasnonroot'] != 'ignore') { + k8s.cronjob { + podSecurityContext=podSpec['securityContext'] + initContainers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + containers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + } + } + docs: + desc: | + Set the `runAsNonRoot: true` `securityContext` to ensure containers do not run as the root user. + When containers run as the `root` user, they have the same privileges as `root` on the host system. + audit: | + Check for the existence of `runAsNonRoot: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `runAsNonRoot` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-statefulset-runasnonroot + title: Container should not run as root + impact: 100 + mql: | + k8s.statefulset { + podSecurityContext=podSpec['securityContext'] + initContainers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + containers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + } + docs: + desc: | + Set the `runAsNonRoot: true` `securityContext` to ensure containers do not run as the root user. + When containers run as the `root` user, they have the same privileges as `root` on the host system. + audit: | + Check for the existence of `runAsNonRoot: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `runAsNonRoot` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-deployment-runasnonroot + title: Container should not run as root + impact: 100 + mql: | + k8s.deployment.containers.all( securityContext['runAsNonRoot'] == true ) + k8s.deployment.initContainers.all( securityContext['runAsNonRoot'] == true ) + docs: + desc: | + Set the `runAsNonRoot: true` `securityContext` to ensure containers do not run as the root user. + When containers run as the `root` user, they have the same privileges as `root` on the host system. + audit: | + Check for the existence of `runAsNonRoot: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `runAsNonRoot` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-job-runasnonroot + title: Container should not run as root + impact: 100 + mql: | + if (k8s.job.annotations['policies.k8s.mondoo.com/mondoo-kubernetes-security-job-runasnonroot'] != 'ignore') { + k8s.job { + podSecurityContext=podSpec['securityContext'] + initContainers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + containers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + } + } + docs: + desc: | + Set the `runAsNonRoot: true` `securityContext` to ensure containers do not run as the root user. + When containers run as the `root` user, they have the same privileges as `root` on the host system. + audit: | + Check for the existence of `runAsNonRoot: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `runAsNonRoot` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-replicaset-runasnonroot + title: Container should not run as root + impact: 100 + mql: | + k8s.replicaset { + podSecurityContext=podSpec['securityContext'] + initContainers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + containers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + } + docs: + desc: | + Set the `runAsNonRoot: true` `securityContext` to ensure containers do not run as the root user. + When containers run as the `root` user, they have the same privileges as `root` on the host system. + audit: | + Check for the existence of `runAsNonRoot: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `runAsNonRoot` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-daemonset-runasnonroot + title: Container should not run as root + impact: 100 + mql: | + k8s.daemonset { + podSecurityContext=podSpec['securityContext'] + initContainers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + containers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + } + docs: + desc: | + Set the `runAsNonRoot: true` `securityContext` to ensure containers do not run as the root user. + When containers run as the `root` user, they have the same privileges as `root` on the host system. + audit: | + Check for the existence of `runAsNonRoot: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `runAsNonRoot` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-pod-hostnetwork + title: Pod should not run with hostNetwork + impact: 80 + mql: k8s.pod.podSpec['hostNetwork'] != true + docs: + desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network including loopback devices. This capability can be used to intercept network traffic including the traffic of other pods. + audit: | + Check for the existence of `hostNetwork: true` setting in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostNetwork` is set to `false` or not present in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-cronjob-hostnetwork + title: Pod should not run with hostNetwork + impact: 80 + mql: k8s.cronjob.podSpec['hostNetwork'] != true + docs: + desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. + audit: | + Check for the existence of `hostNetwork: true` setting in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostNetwork` is set to `false` or not present in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-statefulset-hostnetwork + title: Pod should not run with hostNetwork + impact: 80 + mql: k8s.statefulset.podSpec['hostNetwork'] != true + docs: + desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. + audit: | + Check for the existence of `hostNetwork: true` setting in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostNetwork` is set to `false` or not present in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-deployment-hostnetwork + title: Pod should not run with hostNetwork + impact: 80 + mql: k8s.deployment.podSpec['hostNetwork'] != true + docs: + desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. + audit: | + Check for the existence of `hostNetwork: true` setting in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostNetwork` is set to `false` or not present in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-job-hostnetwork + title: Pod should not run with hostNetwork + impact: 80 + mql: k8s.job.podSpec['hostNetwork'] != true + docs: + desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. + audit: | + Check for the existence of `hostNetwork: true` setting in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostNetwork` is set to `false` or not present in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-replicaset-hostnetwork + title: Pod should not run with hostNetwork + impact: 80 + mql: | + k8s.replicaset.podSpec['hostNetwork'] != true + docs: + desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. + audit: | + Check for the existence of `hostNetwork: true` setting in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostNetwork` is set to `false` or not present in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-daemonset-hostnetwork + title: Pod should not run with hostNetwork + impact: 80 + mql: | + k8s.daemonset.podSpec['hostNetwork'] != true + docs: + desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. + audit: | + Check for the existence of `hostNetwork: true` setting in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostNetwork` is set to `false` or not present in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-pod-hostpid + title: Pod should not run with hostPID + impact: 80 + mql: k8s.pod.podSpec['hostPID'] != true + docs: + desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. + audit: | + Check for the existence of `hostPID: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostPID` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-cronjob-hostpid + title: Pod should not run with hostPID + impact: 80 + mql: k8s.cronjob.podSpec['hostPID'] != true + docs: + desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. + audit: | + Check for the existence of `hostPID: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostPID` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-statefulset-hostpid + title: Pod should not run with hostPID + impact: 80 + mql: k8s.statefulset.podSpec['hostPID'] != true + docs: + desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. + audit: | + Check for the existence of `hostPID: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostPID` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-deployment-hostpid + title: Pod should not run with hostPID + impact: 80 + mql: k8s.deployment.podSpec['hostPID'] != true + docs: + desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. + audit: | + Check for the existence of `hostPID: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostPID` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-job-hostpid + title: Pod should not run with hostPID + impact: 80 + mql: k8s.job.podSpec['hostPID'] != true + docs: + desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. + audit: | + Check for the existence of `hostPID: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostPID` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-replicaset-hostpid + title: Pod should not run with hostPID + impact: 80 + mql: k8s.replicaset.podSpec['hostPID'] != true + docs: + desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. + audit: | + Check for the existence of `hostPID: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostPID` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-daemonset-hostpid + title: Pod should not run with hostPID + impact: 80 + mql: | + k8s.daemonset.podSpec['hostPID'] != true + docs: + desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. + audit: | + Check for the existence of `hostPID: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostPID` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-pod-hostipc + title: Pod should not run with hostIPC + impact: 80 + mql: | + k8s.pod.podSpec['hostIPC'] != true + docs: + desc: | + Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. + audit: | + Check for the existence of `hostIPC: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostIPC` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-cronjob-hostipc + title: Pod should not run with hostIPC + impact: 80 + mql: | + k8s.cronjob.podSpec['hostIPC'] != true + docs: + desc: | + Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. + audit: | + Check for the existence of `hostIPC: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostIPC` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-statefulset-hostipc + title: Pod should not run with hostIPC + impact: 80 + mql: | + k8s.statefulset.podSpec['hostIPC'] != true + docs: + desc: | + Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. + audit: | + Check for the existence of `hostIPC: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostIPC` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-deployment-hostipc + title: Pod should not run with hostIPC + impact: 80 + mql: k8s.deployment.podSpec['hostIPC'] != true + docs: + desc: | + Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. + audit: | + Check for the existence of `hostIPC: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostIPC` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-job-hostipc + title: Pod should not run with hostIPC + impact: 80 + mql: | + k8s.job.podSpec['hostIPC'] != true + docs: + desc: | + Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. + audit: | + Check for the existence of `hostIPC: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostIPC` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-replicaset-hostipc + title: Pod should not run with hostIPC + impact: 80 + mql: | + k8s.replicaset.podSpec['hostIPC'] != true + docs: + desc: | + Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. + audit: | + Check for the existence of `hostIPC: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostIPC` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-daemonset-hostipc + title: Pod should not run with hostIPC + impact: 80 + mql: | + k8s.daemonset.podSpec['hostIPC'] != true + docs: + desc: | + Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. + audit: | + Check for the existence of `hostIPC: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostIPC` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-pod-serviceaccount + title: Pod should not run with the default service account + impact: 30 + mql: | + k8s.pod.podSpec['serviceAccount'] == null || k8s.pod.podSpec['serviceAccount'] == k8s.pod.podSpec['serviceAccountName'] + k8s.pod.podSpec['serviceAccountName'] != '' || k8s.pod.podSpec['automountServiceAccountToken'] == false + k8s.pod.podSpec['serviceAccountName'] != 'default' || k8s.pod.podSpec['automountServiceAccountToken'] == false + docs: + desc: | + Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. + These ServiceAccounts should only have the permissions necessary. + The Pods should not use the default ServiceAccount (named 'default') that is included in every Namespace. + The only valid use for the default ServiceAccount is for Pods that set '.spec.automountServiceAccountToken' to 'false'. + In this case, the Pod explicitly asks for no ServiceAccount to be mounted into the Pod's filesystem, and the Pod is therefore a ServiceAccount-less Pod. + When every Pods uses the default ServiceAccount and the ServiceAccount's privileges get extended, all Pods get these permissions. + When a Pod is compromised, the attacker has access to the API using the default ServiceAccount. + audit: | + Check that Pods do not set the legacy '.spec.serviceAccount': + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccount: some-account + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Check that Pods do not set the '.spec.serviceAccountName' to the empty string (which is interpreted as 'default'), or to 'default'. + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccountName: "" + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Even when the deprecated field '.spec.serviceAccount' is not specified, it will get populated by Kubernetes inside the cluster when a manifest is applied. + Because of that, we also need to check for the field. + remediation: | + Create a ServiceAccount specifically for the Pod with only the permissions it needs when interacting with the Kubernetes API. Update the Pod's '.spec.serviceAccountName' to the name of the ServiceAccount created for the Pod. + + Or if the Pod doesn't interact with the Kubernetes API, set the Pod's `.spec.automountServiceAccountToken` field to false so that no ServiceAccount is available to the Pod. + - uid: mondoo-kubernetes-security-cronjob-serviceaccount + title: Pod should not run with the default service account + impact: 30 + mql: | + k8s.cronjob.podSpec['serviceAccount'] == null || k8s.cronjob.podSpec['serviceAccount'] == k8s.cronjob.podSpec['serviceAccountName'] + k8s.cronjob.podSpec['serviceAccountName'] != '' || k8s.cronjob.podSpec['automountServiceAccountToken'] == false + k8s.cronjob.podSpec['serviceAccountName'] != 'default' || k8s.cronjob.podSpec['automountServiceAccountToken'] == false + docs: + desc: | + Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. + These ServiceAccounts should only have the permissions necessary. + The Pods should not use the default ServiceAccount (named 'default') that is included in every Namespace. + The only valid use for the default ServiceAccount is for Pods that set '.spec.automountServiceAccountToken' to 'false'. + In this case, the Pod explicitly asks for no ServiceAccount to be mounted into the Pod's filesystem, and the Pod is therefore a ServiceAccount-less Pod. + When every Pods uses the default ServiceAccount and the ServiceAccount's privileges get extended, all Pods get these permissions. + When a Pod is compromised, the attacker has access to the API using the default ServiceAccount. + audit: | + Check that Pods do not set the legacy '.spec.serviceAccount': + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccount: some-account + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Check that Pods do not set the '.spec.serviceAccountName' to the empty string (which is interpreted as 'default'), or to 'default'. + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccountName: "" + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Even when the deprecated field '.spec.serviceAccount' is not specified, it will get populated by Kubernetes inside the cluster when a manifest is applied. + Because of that, we also need to check for the field. + remediation: | + Create a ServiceAccount specifically for the Pod with only the permissions it needs when interacting with the Kubernetes API. Update the Pod's '.spec.serviceAccountName' to the name of the ServiceAccount created for the Pod. + + Or if the Pod doesn't interact with the Kubernetes API, set the Pod's `.spec.automountServiceAccountToken` field to false so that no ServiceAccount is available to the Pod. + - uid: mondoo-kubernetes-security-statefulset-serviceaccount + title: Pod should not run with the default service account + impact: 30 + mql: | + k8s.statefulset.podSpec['serviceAccount'] == null || k8s.statefulset.podSpec['serviceAccount'] == k8s.statefulset.podSpec['serviceAccountName'] + k8s.statefulset.podSpec['serviceAccountName'] != '' || k8s.statefulset.podSpec['automountServiceAccountToken'] == false + k8s.statefulset.podSpec['serviceAccountName'] != 'default' || k8s.statefulset.podSpec['automountServiceAccountToken'] == false + docs: + desc: | + Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. + These ServiceAccounts should only have the permissions necessary. + The Pods should not use the default ServiceAccount (named 'default') that is included in every Namespace. + The only valid use for the default ServiceAccount is for Pods that set '.spec.automountServiceAccountToken' to 'false'. + In this case, the Pod explicitly asks for no ServiceAccount to be mounted into the Pod's filesystem, and the Pod is therefore a ServiceAccount-less Pod. + When every Pods uses the default ServiceAccount and the ServiceAccount's privileges get extended, all Pods get these permissions. + When a Pod is compromised, the attacker has access to the API using the default ServiceAccount. + audit: | + Check that Pods do not set the legacy '.spec.serviceAccount': + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccount: some-account + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Check that Pods do not set the '.spec.serviceAccountName' to the empty string (which is interpreted as 'default'), or to 'default'. + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccountName: "" + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Even when the deprecated field '.spec.serviceAccount' is not specified, it will get populated by Kubernetes inside the cluster when a manifest is applied. + Because of that, we also need to check for the field. + remediation: | + Create a ServiceAccount specifically for the Pod with only the permissions it needs when interacting with the Kubernetes API. Update the Pod's '.spec.serviceAccountName' to the name of the ServiceAccount created for the Pod. + + Or if the Pod doesn't interact with the Kubernetes API, set the Pod's `.spec.automountServiceAccountToken` field to false so that no ServiceAccount is available to the Pod. + - uid: mondoo-kubernetes-security-deployment-serviceaccount + title: Pod should not run with the default service account + impact: 30 + mql: | + k8s.deployment.podSpec['serviceAccount'] == null || k8s.deployment.podSpec['serviceAccount'] == k8s.deployment.podSpec['serviceAccountName'] + k8s.deployment.podSpec['serviceAccountName'] != '' || k8s.deployment.podSpec['automountServiceAccountToken'] == false + k8s.deployment.podSpec['serviceAccountName'] != 'default' || k8s.deployment.podSpec['automountServiceAccountToken'] == false + docs: + desc: | + Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. + These ServiceAccounts should only have the permissions necessary. + The Pods should not use the default ServiceAccount (named 'default') that is included in every Namespace. + The only valid use for the default ServiceAccount is for Pods that set '.spec.automountServiceAccountToken' to 'false'. + In this case, the Pod explicitly asks for no ServiceAccount to be mounted into the Pod's filesystem, and the Pod is therefore a ServiceAccount-less Pod. + When every Pods uses the default ServiceAccount and the ServiceAccount's privileges get extended, all Pods get these permissions. + When a Pod is compromised, the attacker has access to the API using the default ServiceAccount. + audit: | + Check that Pods do not set the legacy '.spec.serviceAccount': + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccount: some-account + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Check that Pods do not set the '.spec.serviceAccountName' to the empty string (which is interpreted as 'default'), or to 'default'. + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccountName: "" + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Even when the deprecated field '.spec.serviceAccount' is not specified, it will get populated by Kubernetes inside the cluster when a manifest is applied. + Because of that, we also need to check for the field. + remediation: | + Create a ServiceAccount specifically for the Pod with only the permissions it needs when interacting with the Kubernetes API. Update the Pod's '.spec.serviceAccountName' to the name of the ServiceAccount created for the Pod. + + Or if the Pod doesn't interact with the Kubernetes API, set the Pod's `.spec.automountServiceAccountToken` field to false so that no ServiceAccount is available to the Pod. + - uid: mondoo-kubernetes-security-job-serviceaccount + title: Pod should not run with the default service account + impact: 30 + mql: | + k8s.job.podSpec['serviceAccount'] == null || k8s.job.podSpec['serviceAccount'] == k8s.job.podSpec['serviceAccountName'] + k8s.job.podSpec['serviceAccountName'] != '' || k8s.job.podSpec['automountServiceAccountToken'] == false + k8s.job.podSpec['serviceAccountName'] != 'default' || k8s.job.podSpec['automountServiceAccountToken'] == false + docs: + desc: | + Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. + These ServiceAccounts should only have the permissions necessary. + The Pods should not use the default ServiceAccount (named 'default') that is included in every Namespace. + The only valid use for the default ServiceAccount is for Pods that set '.spec.automountServiceAccountToken' to 'false'. + In this case, the Pod explicitly asks for no ServiceAccount to be mounted into the Pod's filesystem, and the Pod is therefore a ServiceAccount-less Pod. + When every Pods uses the default ServiceAccount and the ServiceAccount's privileges get extended, all Pods get these permissions. + When a Pod is compromised, the attacker has access to the API using the default ServiceAccount. + audit: | + Check that Pods do not set the legacy '.spec.serviceAccount': + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccount: some-account + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Check that Pods do not set the '.spec.serviceAccountName' to the empty string (which is interpreted as 'default'), or to 'default'. + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccountName: "" + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Even when the deprecated field '.spec.serviceAccount' is not specified, it will get populated by Kubernetes inside the cluster when a manifest is applied. + Because of that, we also need to check for the field. + remediation: | + Create a ServiceAccount specifically for the Pod with only the permissions it needs when interacting with the Kubernetes API. Update the Pod's '.spec.serviceAccountName' to the name of the ServiceAccount created for the Pod. + + Or if the Pod doesn't interact with the Kubernetes API, set the Pod's `.spec.automountServiceAccountToken` field to false so that no ServiceAccount is available to the Pod. + - uid: mondoo-kubernetes-security-replicaset-serviceaccount + title: Pod should not run with the default service account + impact: 30 + mql: | + k8s.replicaset.podSpec['serviceAccount'] == null || k8s.replicaset.podSpec['serviceAccount'] == k8s.replicaset.podSpec['serviceAccountName'] + k8s.replicaset.podSpec['serviceAccountName'] != '' || k8s.replicaset.podSpec['automountServiceAccountToken'] == false + k8s.replicaset.podSpec['serviceAccountName'] != 'default' || k8s.replicaset.podSpec['automountServiceAccountToken'] == false + docs: + desc: | + Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. + These ServiceAccounts should only have the permissions necessary. + The Pods should not use the default ServiceAccount (named 'default') that is included in every Namespace. + The only valid use for the default ServiceAccount is for Pods that set '.spec.automountServiceAccountToken' to 'false'. + In this case, the Pod explicitly asks for no ServiceAccount to be mounted into the Pod's filesystem, and the Pod is therefore a ServiceAccount-less Pod. + When every Pods uses the default ServiceAccount and the ServiceAccount's privileges get extended, all Pods get these permissions. + When a Pod is compromised, the attacker has access to the API using the default ServiceAccount. + audit: | + Check that Pods do not set the legacy '.spec.serviceAccount': + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccount: some-account + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Check that Pods do not set the '.spec.serviceAccountName' to the empty string (which is interpreted as 'default'), or to 'default'. + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccountName: "" + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Even when the deprecated field '.spec.serviceAccount' is not specified, it will get populated by Kubernetes inside the cluster when a manifest is applied. + Because of that, we also need to check for the field. + remediation: | + Create a ServiceAccount specifically for the Pod with only the permissions it needs when interacting with the Kubernetes API. Update the Pod's '.spec.serviceAccountName' to the name of the ServiceAccount created for the Pod. + + Or if the Pod doesn't interact with the Kubernetes API, set the Pod's `.spec.automountServiceAccountToken` field to false so that no ServiceAccount is available to the Pod. + - uid: mondoo-kubernetes-security-daemonset-serviceaccount + title: Pod should not run with the default service account + impact: 30 + mql: | + k8s.daemonset.podSpec['serviceAccount'] == null || k8s.daemonset.podSpec['serviceAccount'] == k8s.daemonset.podSpec['serviceAccountName'] + k8s.daemonset.podSpec['serviceAccountName'] != '' || k8s.daemonset.podSpec['automountServiceAccountToken'] == false + k8s.daemonset.podSpec['serviceAccountName'] != 'default' || k8s.daemonset.podSpec['automountServiceAccountToken'] == false + docs: + desc: | + Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. + These ServiceAccounts should only have the permissions necessary. + The Pods should not use the default ServiceAccount (named 'default') that is included in every Namespace. + The only valid use for the default ServiceAccount is for Pods that set '.spec.automountServiceAccountToken' to 'false'. + In this case, the Pod explicitly asks for no ServiceAccount to be mounted into the Pod's filesystem, and the Pod is therefore a ServiceAccount-less Pod. + When every Pods uses the default ServiceAccount and the ServiceAccount's privileges get extended, all Pods get these permissions. + When a Pod is compromised, the attacker has access to the API using the default ServiceAccount. + audit: | + Check that Pods do not set the legacy '.spec.serviceAccount': + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccount: some-account + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Check that Pods do not set the '.spec.serviceAccountName' to the empty string (which is interpreted as 'default'), or to 'default'. + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccountName: "" + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Even when the deprecated field '.spec.serviceAccount' is not specified, it will get populated by Kubernetes inside the cluster when a manifest is applied. + Because of that, we also need to check for the field. + remediation: | + Create a ServiceAccount specifically for the Pod with only the permissions it needs when interacting with the Kubernetes API. Update the Pod's '.spec.serviceAccountName' to the name of the ServiceAccount created for the Pod. + + Or if the Pod doesn't interact with the Kubernetes API, set the Pod's `.spec.automountServiceAccountToken` field to false so that no ServiceAccount is available to the Pod. + - uid: mondoo-kubernetes-security-pod-imagepull + title: Container image pull should be consistent + impact: 60 + mql: | + k8s.pod.ephemeralContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.pod.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.pod.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + docs: + desc: | + It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). + Avoid using rolling tags like `latest` or `master` as they can change over time. + audit: | + Check for the existence of `imagePullPolicy: Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + remediation: | + Ensure `imagePullPolicy` is set to `Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + refs: + - url: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + title: Image pull policy + - uid: mondoo-kubernetes-security-cronjob-imagepull + title: Container image pull should be consistent + impact: 60 + mql: | + k8s.cronjob.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.cronjob.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + docs: + desc: | + It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). + Avoid using rolling tags like `latest` or `master` as they can change over time. + audit: | + Check for the existence of `imagePullPolicy: Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + remediation: | + Ensure `imagePullPolicy` is set to `Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + refs: + - url: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + title: Image pull policy + - uid: mondoo-kubernetes-security-statefulset-imagepull + title: Container image pull should be consistent + impact: 60 + mql: | + k8s.statefulset.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.statefulset.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + docs: + desc: | + It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). + Avoid using rolling tags like `latest` or `master` as they can change over time. + audit: | + Check for the existence of `imagePullPolicy: Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + remediation: | + Ensure `imagePullPolicy` is set to `Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + refs: + - url: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + title: Image pull policy + - uid: mondoo-kubernetes-security-deployment-imagepull + title: Container image pull should be consistent + impact: 60 + mql: | + k8s.deployment.initContainers.all( imagePullPolicy == 'Always' && correctImage = image != /:latest/ && image.contains(':') == true ) + k8s.deployment.containers.all( imagePullPolicy == 'Always' && correctImage = image != /:latest/ && image.contains(':') == true ) + docs: + desc: | + It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). + Avoid using rolling tags like `latest` or `master` as they can change over time. + audit: | + Check for the existence of `imagePullPolicy: Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + remediation: | + Ensure `imagePullPolicy` is set to `Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + refs: + - url: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + title: Image pull policy + - uid: mondoo-kubernetes-security-job-imagepull + title: Container image pull should be consistent + impact: 60 + mql: | + k8s.job.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.job.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + docs: + desc: | + It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). + Avoid using rolling tags like `latest` or `master` as they can change over time. + audit: | + Check for the existence of `imagePullPolicy: Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + remediation: | + Ensure `imagePullPolicy` is set to `Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + refs: + - url: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + title: Image pull policy + - uid: mondoo-kubernetes-security-replicaset-imagepull + title: Container image pull should be consistent + impact: 60 + mql: | + k8s.replicaset.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.replicaset.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + docs: + desc: | + It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). + Avoid using rolling tags like `latest` or `master` as they can change over time. + audit: | + Check for the existence of `imagePullPolicy: Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + remediation: | + Ensure `imagePullPolicy` is set to `Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + refs: + - url: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + title: Image pull policy + - uid: mondoo-kubernetes-security-daemonset-imagepull + title: Container image pull should be consistent + impact: 60 + mql: | + k8s.daemonset.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.daemonset.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + docs: + desc: | + It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). + Avoid using rolling tags like `latest` or `master` as they can change over time. + audit: | + Check for the existence of `imagePullPolicy: Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + remediation: | + Ensure `imagePullPolicy` is set to `Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + refs: + - url: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + title: Image pull policy + - uid: mondoo-kubernetes-security-pod-limitcpu + title: Container should have a CPU limit + impact: 20 + mql: | + k8s.pod.initContainers.all( resources['limits']['cpu'] != null ) + k8s.pod.containers.all( resources['limits']['cpu'] != null ) + docs: + desc: | + Kubernetes Pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of CPU resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + remediation: | + Define the required resources for CPU `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + title: Resource Management for Pods and Containers + - uid: mondoo-kubernetes-security-cronjob-limitcpu + title: Container should have a CPU limit + impact: 20 + mql: | + k8s.cronjob.initContainers.all( resources['limits']['cpu'] != null ) + k8s.cronjob.containers.all( resources['limits']['cpu'] != null ) + docs: + desc: | + Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of CPU resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + remediation: | + Define the required resources for CPU `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + title: Resource Management for Pods and Containers + - uid: mondoo-kubernetes-security-statefulset-limitcpu + title: Container should have a CPU limit + impact: 20 + mql: | + k8s.statefulset.initContainers.all( resources['limits']['cpu'] != null ) + k8s.statefulset.containers.all( resources['limits']['cpu'] != null ) + docs: + desc: | + Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of CPU resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + remediation: | + Define the required resources for CPU `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + title: Resource Management for Pods and Containers + - uid: mondoo-kubernetes-security-deployment-limitcpu + title: Container should have a CPU limit + impact: 20 + mql: | + k8s.deployment.initContainers.all( resources['limits']['cpu'] != null ) + k8s.deployment.containers.all( resources['limits']['cpu'] != null ) + docs: + desc: | + Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of CPU resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + remediation: | + Define the required resources for CPU `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + title: Resource Management for Pods and Containers + - uid: mondoo-kubernetes-security-job-limitcpu + title: Container should have a CPU limit + impact: 20 + mql: | + k8s.job.initContainers.all( resources['limits']['cpu'] != null ) + k8s.job.containers.all( resources['limits']['cpu'] != null ) + docs: + desc: | + Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of CPU resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + remediation: | + Define the required resources for CPU `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + title: Resource Management for Pods and Containers + - uid: mondoo-kubernetes-security-replicaset-limitcpu + title: Container should have a CPU limit + impact: 20 + mql: | + k8s.replicaset.initContainers.all( resources['limits']['cpu'] != null ) + k8s.replicaset.containers.all( resources['limits']['cpu'] != null ) + docs: + desc: | + Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of CPU resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + remediation: | + Define the required resources for CPU `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + title: Resource Management for Pods and Containers + - uid: mondoo-kubernetes-security-daemonset-limitcpu + title: Container should have a CPU limit + impact: 20 + mql: | + k8s.daemonset.initContainers.all( resources['limits']['cpu'] != null ) + k8s.daemonset.containers.all( resources['limits']['cpu'] != null ) + docs: + desc: | + Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of CPU resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + remediation: | + Define the required resources for CPU `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + title: Resource Management for Pods and Containers + - uid: mondoo-kubernetes-security-pod-limitmemory + title: Container should have a memory limit + impact: 20 + mql: | + k8s.pod.initContainers.all( resources['limits']['memory'] != null ) + k8s.pod.containers.all( resources['limits']['memory'] != null ) + docs: + desc: | + Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of memory resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + remediation: | + Define the required resources for memory `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + - uid: mondoo-kubernetes-security-cronjob-limitmemory + title: Container should have a memory limit + impact: 20 + mql: | + k8s.cronjob.initContainers.all( resources['limits']['memory'] != null ) + k8s.cronjob.containers.all( resources['limits']['memory'] != null ) + docs: + desc: | + Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of memory resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + remediation: | + Define the required resources for memory `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + - uid: mondoo-kubernetes-security-statefulset-limitmemory + title: Container should have a memory limit + impact: 20 + mql: | + k8s.statefulset.initContainers.all( resources['limits']['memory'] != null ) + k8s.statefulset.containers.all( resources['limits']['memory'] != null ) + docs: + desc: | + Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of memory resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + remediation: | + Define the required resources for memory `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + - uid: mondoo-kubernetes-security-deployment-limitmemory + title: Container should have a memory limit + impact: 20 + mql: | + k8s.deployment.initContainers.all( resources['limits']['memory'] != null ) + k8s.deployment.containers.all( resources['limits']['memory'] != null ) + docs: + desc: | + Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of memory resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + remediation: | + Define the required resources for memory `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + - uid: mondoo-kubernetes-security-job-limitmemory + title: Container should have a memory limit + impact: 20 + mql: | + k8s.job.initContainers.all( resources['limits']['memory'] != null ) + k8s.job.containers.all( resources['limits']['memory'] != null ) + docs: + desc: | + Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of memory resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + remediation: | + Define the required resources for memory `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + - uid: mondoo-kubernetes-security-replicaset-limitmemory + title: Container should have a memory limit + impact: 20 + mql: | + k8s.replicaset.initContainers.all( resources['limits']['memory'] != null ) + k8s.replicaset.containers.all( resources['limits']['memory'] != null ) + docs: + desc: | + Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of memory resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + remediation: | + Define the required resources for memory `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + - uid: mondoo-kubernetes-security-daemonset-limitmemory + title: Container should have a memory limit + impact: 20 + mql: | + k8s.daemonset.initContainers.all( resources['limits']['memory'] != null ) + k8s.daemonset.containers.all( resources['limits']['memory'] != null ) + docs: + desc: | + Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of memory resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + remediation: | + Define the required resources for memory `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + - uid: mondoo-kubernetes-security-pod-capability-net-raw + title: Pods should not run with NET_RAW capability + impact: 80 + mql: | + k8s.pod.podSpec['ephemeralContainers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.pod.podSpec['ephemeralContainers'].all( _['securityContext']['capabilities'] { _['drop'].any( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.pod.podSpec['initContainers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.pod.podSpec['initContainers'].all( _['securityContext']['capabilities'] { _['drop'].any( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.pod.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.pod.podSpec['containers'].all( _['securityContext']['capabilities'] { _['drop'].any( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + docs: + desc: | + Pods should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. + audit: | + Check to ensure no Pods have explicitly asked for the NET_RAW capability (or asked for ALL capabilities which includes NET_RAW): + + ```kubectl get pods -A -o json | jq -r '.items[] | select(.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|NET_RAW")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + + + Additionally, a Pod that doesn't define a list of capabilities to drop at all, or that has a non-empty drop list that doesn't drop NET_RAW (or the ALL capability which includes NET_RAW) will implicitly run with NET_RAW. List these Pods with: + + ```kubectl get pods -A -o json | jq -r '.items[] | select( .spec.containers[].securityContext.capabilities.drop | . == null or ( any(.[] ; ascii_upcase | test("ALL|NET_RAW")) | not) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Pods that explicitly add the NET_RAW or ALL capability, update the Pods (or the Deployments/DaemonSets/CronJobs/etc that produced the Pods) to ensure they do not ask for the NET_RAW or ALL capability: + + ```yaml + apiVersion: v1 + kind: Pod + metadata: + name: examplePod + namespace: example-namespace + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "NET_RAW" or "ALL" in the list of capabilities added + ``` + + For any Pods that do not define a list of capabilities to drop or that define a list but do not drop NET_RAW, update the Pods (or the Deployments/DaemonSets/CronJobs/etc that produced the Pods) to ensure they drop ALL or NET_RAW: + + ```yaml + apiVersion: v1 + kind: Pod + metadata: + name: example + namespace: example-namespace + spec: + containers: + - securityContext: + capabilities: + drop: ["NET_RAW"] # <-- or ensure "ALL" in the list of capabilities to drop + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + title: 'Kubernetes Security Standards: Capabilities' + - uid: mondoo-kubernetes-security-daemonset-capability-net-raw + title: DaemonSets should not run with NET_RAW capability + impact: 80 + mql: | + k8s.daemonset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.daemonset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['drop'].any( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + docs: + desc: | + DaemonSets should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. + audit: | + Check to ensure no DaemonSets have explicitly asked for the NET_RAW capability (or asked for ALL capabilities which includes NET_RAW): + + ```kubectl get daemonsets -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|NET_RAW")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + + + Additionally, a DaemonSet that doesn't define a list of capabilities to drop at all, or that has a non-empty drop list that doesn't drop NET_RAW (or the ALL capability which includes NET_RAW) will implicitly run with NET_RAW. List these DaemonSets with: + + ```kubectl get daemonsets -A -o json | jq -r '.items[] | select( .spec.template.spec.containers[].securityContext.capabilities.drop | . == null or ( any(.[] ; ascii_upcase | test("ALL|NET_RAW")) | not) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any DaemonSets that explicitly add the NET_RAW or ALL capability, update them to ensure they do not ask for the NET_RAW or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "NET_RAW" or "ALL" in the list of capabilities added + ``` + + For any DaemonSets that do not define a list of capabilities to drop or that define a list but do not drop NET_RAW, update them to ensure they drop ALL or NET_RAW: + + ```yaml + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + drop: ["NET_RAW"] # <-- or ensure "ALL" in the list of capabilities to drop + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + title: 'Kubernetes Security Standards: Capabilities' + - uid: mondoo-kubernetes-security-replicaset-capability-net-raw + title: ReplicaSets should not run with NET_RAW capability + impact: 80 + mql: | + k8s.replicaset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.replicaset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['drop'].any( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + docs: + desc: | + ReplicaSets should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. + audit: | + Check to ensure no ReplicaSets have explicitly asked for the NET_RAW capability (or asked for ALL capabilities which includes NET_RAW): + + ```kubectl get replicasets -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|NET_RAW")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + + + Additionally, a ReplicaSet that doesn't define a list of capabilities to drop at all, or that has a non-empty drop list that doesn't drop NET_RAW (or the ALL capability which includes NET_RAW) will implicitly run with NET_RAW. List these DaemonSets with: + + ```kubectl get replicasets -A -o json | jq -r '.items[] | select( .spec.template.spec.containers[].securityContext.capabilities.drop | . == null or ( any(.[] ; ascii_upcase | test("ALL|NET_RAW")) | not) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any ReplicaSets that explicitly add the NET_RAW or ALL capability, update them to ensure they do not ask for the NET_RAW or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: ReplicaSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "NET_RAW" or "ALL" in the list of capabilities added + ``` + + For any ReplicaSets that do not define a list of capabilities to drop or that define a list but do not drop NET_RAW, update them to ensure they drop ALL or NET_RAW: + + ```yaml + apiVersion: apps/v1 + kind: ReplicaSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + drop: ["NET_RAW"] # <-- or ensure "ALL" in the list of capabilities to drop + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + title: 'Kubernetes Security Standards: Capabilities' + - uid: mondoo-kubernetes-security-job-capability-net-raw + title: Jobs should not run with NET_RAW capability + impact: 80 + mql: | + k8s.deployment.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + docs: + desc: | + Jobs should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. + audit: | + Check to ensure no Jobs have explicitly asked for the NET_RAW capability (or asked for ALL capabilities which includes NET_RAW): + + ```kubectl get jobs -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|NET_RAW")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + + + Additionally, a Job that doesn't define a list of capabilities to drop at all, or that has a non-empty drop list that doesn't drop NET_RAW (or the ALL capability which includes NET_RAW) will implicitly run with NET_RAW. List these DaemonSets with: + + ```kubectl get jobs -A -o json | jq -r '.items[] | select( .spec.template.spec.containers[].securityContext.capabilities.drop | . == null or ( any(.[] ; ascii_upcase | test("ALL|NET_RAW")) | not) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Jobs that explicitly add the NET_RAW or ALL capability, update them to ensure they do not ask for the NET_RAW or ALL capability: + + ```yaml + apiVersion: batch/v1 + kind: Job + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "NET_RAW" or "ALL" in the list of capabilities added + ``` + + For any Jobs that do not define a list of capabilities to drop or that define a list but do not drop NET_RAW, update them to ensure they drop ALL or NET_RAW: + + ```yaml + apiVersion: batch/v1 + kind: Job + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + drop: ["NET_RAW"] # <-- or ensure "ALL" in the list of capabilities to drop + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + title: 'Kubernetes Security Standards: Capabilities' + - uid: mondoo-kubernetes-security-deployment-capability-net-raw + title: Deployments should not run with NET_RAW capability + impact: 80 + mql: | + k8s.deployment.podSpec['containers'].all( _['securityContext']['capabilities'].none( _['add'].contains("NET_RAW") )) + k8s.deployment.podSpec['containers'].all( _['securityContext']['capabilities'].none( _['add'].contains("ALL") )) + docs: + desc: | + Deployments should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. + audit: | + Check to ensure no Deployments have explicitly asked for the NET_RAW capability (or asked for ALL capabilities which includes NET_RAW): + + ```kubectl get deployments -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|NET_RAW")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + + + Additionally, a Deployment that doesn't define a list of capabilities to drop at all, or that has a non-empty drop list that doesn't drop NET_RAW (or the ALL capability which includes NET_RAW) will implicitly run with NET_RAW. List these DaemonSets with: + + ```kubectl get deployments -A -o json | jq -r '.items[] | select( .spec.template.spec.containers[].securityContext.capabilities.drop | . == null or ( any(.[] ; ascii_upcase | test("ALL|NET_RAW")) | not) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Deployments that explicitly add the NET_RAW or ALL capability, update them to ensure they do not ask for the NET_RAW or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "NET_RAW" or "ALL" in the list of capabilities added + ``` + + For any Deployments that do not define a list of capabilities to drop or that define a list but do not drop NET_RAW, update them to ensure they drop ALL or NET_RAW: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + drop: ["NET_RAW"] # <-- or ensure "ALL" in the list of capabilities to drop + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + title: 'Kubernetes Security Standards: Capabilities' + - uid: mondoo-kubernetes-security-statefulset-capability-net-raw + title: StatefulSets should not run with NET_RAW capability + impact: 80 + mql: | + k8s.statefulset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.statefulset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['drop'].any( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + docs: + desc: | + StatefulSets should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. + audit: | + Check to ensure no StatefulSets have explicitly asked for the NET_RAW capability (or asked for ALL capabilities which includes NET_RAW): + + ```kubectl get statefulsets -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|NET_RAW")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + + + Additionally, a StatefulSet that doesn't define a list of capabilities to drop at all, or that has a non-empty drop list that doesn't drop NET_RAW (or the ALL capability which includes NET_RAW) will implicitly run with NET_RAW. List these DaemonSets with: + + ```kubectl get statefulsets -A -o json | jq -r '.items[] | select( .spec.template.spec.containers[].securityContext.capabilities.drop | . == null or ( any(.[] ; ascii_upcase | test("ALL|NET_RAW")) | not) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any StatefulSets that explicitly add the NET_RAW or ALL capability, update them to ensure they do not ask for the NET_RAW or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "NET_RAW" or "ALL" in the list of capabilities added + ``` + + For any StatefulSets that do not define a list of capabilities to drop or that define a list but do not drop NET_RAW, update them to ensure they drop ALL or NET_RAW: + + ```yaml + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + drop: ["NET_RAW"] # <-- or ensure "ALL" in the list of capabilities to drop + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + title: 'Kubernetes Security Standards: Capabilities' + - uid: mondoo-kubernetes-security-cronjob-capability-net-raw + title: CronJobs should not run with NET_RAW capability + impact: 80 + mql: | + k8s.cronjob.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.cronjob.podSpec['containers'].all( _['securityContext']['capabilities'] { _['drop'].any( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + docs: + desc: | + CronJobs should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. + audit: | + Check to ensure no CronJobs have explicitly asked for the NET_RAW capability (or asked for ALL capabilities which includes NET_RAW): + + ```kubectl get cronjobs -A -o json | jq -r '.items[] | select(.spec.jobTemplate.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|NET_RAW")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + + + Additionally, a CronJob that doesn't define a list of capabilities to drop at all, or that has a non-empty drop list that doesn't drop NET_RAW (or the ALL capability which includes NET_RAW) will implicitly run with NET_RAW. List these DaemonSets with: + + ```kubectl get cronjobs -A -o json | jq -r '.items[] | select( .spec.jobTemplate.spec.template.spec.containers[].securityContext.capabilities.drop | . == null or ( any(.[] ; ascii_upcase | test("ALL|NET_RAW")) | not) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any CronJobs that explicitly add the NET_RAW or ALL capability, update them to ensure they do not ask for the NET_RAW or ALL capability: + + ```yaml + apiVersion: batch/v1 + kind: CronJob + metadata: + name: example + namespace: example-namespace + spec: + jobTemplate: + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "NET_RAW" or "ALL" in the list of capabilities added + ``` + + For any CronJobs that do not define a list of capabilities to drop or that define a list but do not drop NET_RAW, update them to ensure they drop ALL or NET_RAW: + + ```yaml + apiVersion: batch/v1 + kind: CronJob + metadata: + name: example + namespace: example-namespace + spec: + jobTemplate: + spec: + template: + spec: + containers: + - securityContext: + capabilities: + drop: ["NET_RAW"] # <-- or ensure "ALL" in the list of capabilities to drop + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + title: 'Kubernetes Security Standards: Capabilities' + - uid: mondoo-kubernetes-security-pod-capability-sys-admin + title: Pods should not run with SYS_ADMIN capability + impact: 80 + mql: | + k8s.pod.podSpec['initContainers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + k8s.pod.podSpec['ephemeralContainers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + k8s.pod.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + docs: + desc: | + Pods should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. + It even allows containers not running as root to run certain tasks as if the user was root. + audit: | + Check to ensure no Pods have explicitly asked for the SYS_ADMIN capability (or asked for ALL capabilities which includes SYS_ADMIN): + + ```kubectl get pods -A -o json | jq -r '.items[] | select(.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|SYS_ADMIN")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Pods that explicitly add the SYS_ADMIN or ALL capability, update the Pods (or the Deployments/DaemonSets/CronJobs/etc that produced the Pods) to ensure they do not ask for the SYS_ADMIN or ALL capability: + + ```yaml + apiVersion: v1 + kind: Pod + metadata: + name: examplePod + namespace: example-namespace + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "SYS_ADMIN" or "ALL" in the list of capabilities added + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Capabilities' + - url: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities + title: Docker default capabilities + - uid: mondoo-kubernetes-security-daemonset-capability-sys-admin + title: DaemonSets should not run with SYS_ADMIN capability + impact: 80 + mql: | + k8s.daemonset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + docs: + desc: | + DaemonSets should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. + It even allows containers not running as root to run certain tasks as if the user was root. + audit: | + Check to ensure no DaemonSets have explicitly asked for the SYS_ADMIN capability (or asked for ALL capabilities which includes SYS_ADMIN): + + ```kubectl get daemonsets -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|SYS_ADMIN")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any DaemonSets that explicitly add the SYS_ADMIN or ALL capability, update them to ensure they do not ask for the SYS_ADMIN or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "SYS_ADMIN" or "ALL" in the list of capabilities added + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Capabilities' + - url: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities + title: Docker default capabilities + - uid: mondoo-kubernetes-security-replicaset-capability-sys-admin + title: ReplicaSets should not run with SYS_ADMIN capability + impact: 80 + mql: | + k8s.replicaset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + docs: + desc: | + ReplicaSets should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. + It even allows containers not running as root to run certain tasks as if the user was root. + audit: | + Check to ensure no ReplicaSets have explicitly asked for the SYS_ADMIN capability (or asked for ALL capabilities which includes SYS_ADMIN): + + ```kubectl get replicasets -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|SYS_ADMIN")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any ReplicaSets that explicitly add the SYS_ADMIN or ALL capability, update them to ensure they do not ask for the SYS_ADMIN or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: ReplicaSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "SYS_ADMIN" or "ALL" in the list of capabilities added + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Capabilities' + - url: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities + title: Docker default capabilities + - uid: mondoo-kubernetes-security-job-capability-sys-admin + title: Jobs should not run with SYS_ADMIN capability + impact: 80 + mql: | + k8s.job.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + docs: + desc: | + Jobs should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. + It even allows containers not running as root to run certain tasks as if the user was root. + audit: | + Check to ensure no Jobs have explicitly asked for the SYS_ADMIN capability (or asked for ALL capabilities which includes SYS_ADMIN): + + ```kubectl get jobs -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|SYS_ADMIN")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Jobs that explicitly add the SYS_ADMIN or ALL capability, update them to ensure they do not ask for the SYS_ADMIN or ALL capability: + + ```yaml + apiVersion: batch/v1 + kind: Job + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "SYS_ADMIN" or "ALL" in the list of capabilities added + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Capabilities' + - url: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities + title: Docker default capabilities + - uid: mondoo-kubernetes-security-deployment-capability-sys-admin + title: Deployments should not run with SYS_ADMIN capability + impact: 80 + mql: k8s.deployment.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + docs: + desc: | + Deployments should not run wIt even allows containers not running as root to run certain tasks as if the user was root with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. + It even allows containers not running as root to run certain tasks as if the user was root. + audit: | + Check to ensure no Deployments have explicitly asked for the SYS_ADMIN capability (or asked for ALL capabilities which includes SYS_ADMIN): + + ```kubectl get deployments -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|SYS_ADMIN")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Deployments that explicitly add the SYS_ADMIN or ALL capability, update them to ensure they do not ask for the SYS_ADMIN or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "SYS_ADMIN" or "ALL" in the list of capabilities added + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Capabilities' + - url: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities + title: Docker default capabilities + - uid: mondoo-kubernetes-security-statefulset-capability-sys-admin + title: StatefulSets should not run with SYS_ADMIN capability + impact: 80 + mql: | + k8s.statefulset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + docs: + desc: | + StatefulSets should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. + It even allows containers not running as root to run certain tasks as if the user was root. + audit: | + Check to ensure no StatefulSets have explicitly asked for the SYS_ADMIN capability (or asked for ALL capabilities which includes SYS_ADMIN): + + ```kubectl get statefulsets -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|SYS_ADMIN")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any StatefulSets that explicitly add the SYS_ADMIN or ALL capability, update them to ensure they do not ask for the SYS_ADMIN or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "SYS_ADMIN" or "ALL" in the list of capabilities added + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Capabilities' + - url: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities + title: Docker default capabilities + - uid: mondoo-kubernetes-security-cronjob-capability-sys-admin + title: CronJobs should not run with SYS_ADMIN capability + impact: 80 + mql: | + k8s.cronjob.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + docs: + desc: | + CronJobs should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. + It even allows containers not running as root to run certain tasks as if the user was root. + audit: | + Check to ensure no CronJobs have explicitly asked for the SYS_ADMIN capability (or asked for ALL capabilities which includes SYS_ADMIN): + + ```kubectl get cronjobs -A -o json | jq -r '.items[] | select(.spec.jobTemplate.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|SYS_ADMIN")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any CronJobs that explicitly add the SYS_ADMIN or ALL capability, update them to ensure they do not ask for the SYS_ADMIN or ALL capability: + + ```yaml + apiVersion: batch/v1 + kind: CronJob + metadata: + name: example + namespace: example-namespace + spec: + jobTemplate: + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "SYS_ADMIN" or "ALL" in the list of capabilities added + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Capabilities' + - url: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities + title: Docker default capabilities + - uid: mondoo-kubernetes-security-pod-ports-hostport + title: Pods should not bind to a host port + impact: 80 + mql: | + k8s.pod.podSpec['containers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + k8s.pod.podSpec['initContainers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + docs: + desc: | + Pods should not bind to the underlying host port. This allows bypassing certain network access control systems. + Host ports also expose the Container outside the Kubernetes cluster, which might not be intended. + audit: | + Check to ensure no Pods are binding any of their containers to a host port: + + ```kubectl get pods -A -o json | jq -r '.items[] | select( (.spec.containers[].ports | . != null and any(.[].hostPort; . != null) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Pods that bind to a host port, update the Pods (or the Deployments/DaemonSets/CronJobs/etc that produced the Pods) to ensure they do not bind to a host port: + + ```yaml + apiVersion: v1 + kind: Pod + metadata: + name: example + namespace: example-namespace + spec: + containers: + - ports: + - containerPort: 80 # <-- ensure no 'hostPort' is defined in any entries of the port configurations + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/overview/#services + title: 'Kubernetes Configuration Best Practices: hostPort' + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Host Ports' + - uid: mondoo-kubernetes-security-daemonset-ports-hostport + title: DaemonSets should not bind to a host port + impact: 80 + mql: | + k8s.daemonset.podSpec['containers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + docs: + desc: | + DaemonSets should not bind to the underlying host port. This allows bypassing certain network access control systems. + Host ports also expose the Container outside the Kubernetes cluster, which might not be intended. + audit: | + Check to ensure no DaemonSets are binding any of their containers to a host port: + + ```kubectl get daemonsets -A -o json | jq -r '.items[] | select( (.spec.template.spec.containers[].ports | . != null and any(.[].hostPort; . != null) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any DaemonSets that bind to a host port, update the DaemonSets to ensure they do not bind to a host port: + + ```yaml + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - ports: + - containerPort: 80 # <-- ensure no 'hostPort' is defined in any entries of the port configurations + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/overview/#services + title: 'Kubernetes Configuration Best Practices: hostPort' + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Host Ports' + - uid: mondoo-kubernetes-security-replicaset-ports-hostport + title: ReplicaSets should not bind to a host port + impact: 80 + mql: | + k8s.replicaset.podSpec['containers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + docs: + desc: | + ReplicaSets should not bind to the underlying host port. This allows bypassing certain network access control systems. + Host ports also expose the Container outside the Kubernetes cluster, which might not be intended. + audit: | + Check to ensure no ReplicaSets are binding any of their containers to a host port: + + ```kubectl get replicasets -A -o json | jq -r '.items[] | select( (.spec.template.spec.containers[].ports | . != null and any(.[].hostPort; . != null) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any ReplicaSets that bind to a host port, update the ReplicaSets to ensure they do not bind to a host port: + + ```yaml + apiVersion: apps/v1 + kind: ReplicaSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - ports: + - containerPort: 80 # <-- ensure no 'hostPort' is defined in any entries of the port configurations + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/overview/#services + title: 'Kubernetes Configuration Best Practices: hostPort' + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Host Ports' + - uid: mondoo-kubernetes-security-job-ports-hostport + title: Jobs should not bind to a host port + impact: 80 + mql: | + k8s.job.podSpec['containers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + docs: + desc: | + Jobs should not bind to the underlying host port. This allows bypassing certain network access control systems. + Host ports also expose the Container outside the Kubernetes cluster, which might not be intended. + audit: | + Check to ensure no Jobs are binding any of their containers to a host port: + + ```kubectl get jobs -A -o json | jq -r '.items[] | select( (.spec.template.spec.containers[].ports | . != null and any(.[].hostPort; . != null) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any ReplicaSets that bind to a host port, update the Jobs to ensure they do not bind to a host port: + + ```yaml + apiVersion: batch/v1 + kind: Job + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - ports: + - containerPort: 80 # <-- ensure no 'hostPort' is defined in any entries of the port configurations + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/overview/#services + title: 'Kubernetes Configuration Best Practices: hostPort' + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Host Ports' + - uid: mondoo-kubernetes-security-deployment-ports-hostport + title: Deployments should not bind to a host port + impact: 80 + mql: | + k8s.deployment.podSpec['containers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + docs: + desc: | + Deployments should not bind to the underlying host port. This allows bypassing certain network access control systems. + Host ports also expose the Container outside the Kubernetes cluster, which might not be intended. + audit: | + Check to ensure no Deployments are binding any of their containers to a host port: + + ```kubectl get deployments -A -o json | jq -r '.items[] | select( (.spec.template.spec.containers[].ports | . != null and any(.[].hostPort; . != null) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Deployments that bind to a host port, update the Deployments to ensure they do not bind to a host port: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - ports: + - containerPort: 80 # <-- ensure no 'hostPort' is defined in any entries of the port configurations + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/overview/#services + title: 'Kubernetes Configuration Best Practices: hostPort' + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Host Ports' + - uid: mondoo-kubernetes-security-statefulset-ports-hostport + title: StatefulSets should not bind to a host port + impact: 80 + mql: | + k8s.statefulset.podSpec['containers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + docs: + desc: | + StatefulSets should not bind to the underlying host port. This allows bypassing certain network access control systems. + Host ports also expose the Container outside the Kubernetes cluster, which might not be intended. + audit: | + Check to ensure no StatefulSets are binding any of their containers to a host port: + + ```kubectl get statefulsets -A -o json | jq -r '.items[] | select( (.spec.template.spec.containers[].ports | . != null and any(.[].hostPort; . != null) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any StatefulSets that bind to a host port, update the StatefulSets to ensure they do not bind to a host port: + + ```yaml + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - ports: + - containerPort: 80 # <-- ensure no 'hostPort' is defined in any entries of the port configurations + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/overview/#services + title: 'Kubernetes Configuration Best Practices: hostPort' + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Host Ports' + - uid: mondoo-kubernetes-security-cronjob-ports-hostport + title: CronJobs should not bind to a host port + impact: 80 + mql: | + k8s.cronjob.podSpec['containers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + docs: + desc: | + CronJobs should not bind to the underlying host port. This allows bypassing certain network access control systems. + Host ports also expose the Container outside the Kubernetes cluster, which might not be intended. + audit: | + Check to ensure no CronJobs are binding any of their containers to a host port: + + ```kubectl get cronjobs -A -o json | jq -r '.items[] | select( (.spec.jobTemplate.spec.template.spec.containers[].ports | . != null and any(.[].hostPort; . != null) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any CronJobs that bind to a host port, update the CronJobs to ensure they do not bind to a host port: + + ```yaml + apiVersion: batch/v1 + kind: CronJob + metadata: + name: example + namespace: example-namespace + spec: + jobTemplate: + spec: + template: + spec: + containers: + - ports: + - containerPort: 80 # <-- ensure no 'hostPort' is defined in any entries of the port configurations + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/overview/#services + title: 'Kubernetes Configuration Best Practices: hostPort' + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Host Ports' + - uid: mondoo-kubernetes-security-pod-hostpath-readonly + title: Pods should mount any host path volumes as read-only + impact: 80 + mql: | + k8s.pod.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['containers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + k8s.pod.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['initContainers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + k8s.pod.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['ephemeralContainers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + docs: + desc: | + Pods should not mount volumes of type hostPath as read-write. Containers should not be granted the ability to mutate the underlying host they are running on. + This can even lead to container escapes. + audit: | + Check to ensure no containers in a Pod are mounting hostPath volumes as read-write: + + ```kubectl get pods -A -o json | jq -r '.items[] | [.spec.volumes[] | select(.hostPath != null) | .name] as $myVar | select(.spec.containers[].volumeMounts | (. != null and ( .[] | ( [.name] | inside($myVar) ) and .readOnly != true ) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Pod containers that mount a hostPath volume as read-write, update them (or the Deployment/StatefulSet/etc that created the Pod): + + ```yaml + apiVersion: v1 + kind: Pod + metadata: + name: example + namespace: example-namespace + spec: + containers: + - volumeMounts: + - mountPath: /host + name: hostpath-volume + readOnly: true # <-- ensure readOnly is set to true + volumes: + - hostPath: + path: /etc + name: hostpath-volume + ``` + - uid: mondoo-kubernetes-security-daemonset-hostpath-readonly + title: DaemonSets should mount any host path volumes as read-only + impact: 80 + mql: | + k8s.daemonset.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['containers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + docs: + desc: | + DaemonSets should not mount volumes of type hostPath as read-write. Containers should not be granted the ability to mutate the underlying host they are running on. + This can even lead to container escapes. + audit: | + Check to ensure no containers in a DaemonSet are mounting hostPath volumes as read-write: + + ```kubectl get daemonsets -A -o json | jq -r '.items[] | [.spec.template.spec.volumes[] | select(.hostPath != null) | .name] as $myVar | select(.spec.template.spec.containers[].volumeMounts | (. != null and ( .[] | ( [.name] | inside($myVar) ) and .readOnly != true ) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any DaemonSet containers that mount a hostPath volume as read-write, update them: + + ```yaml + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - volumeMounts: + - mountPath: /host + name: hostpath-volume + readOnly: true # <-- ensure readOnly is set to true + volumes: + - hostPath: + path: /etc + name: hostpath-volume + ``` + - uid: mondoo-kubernetes-security-replicaset-hostpath-readonly + title: ReplicaSets should mount any host path volumes as read-only + impact: 80 + mql: | + k8s.replicaset.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['containers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + docs: + desc: | + ReplicaSets should not mount volumes of type hostPath as read-write. Containers should not be granted the ability to mutate the underlying host they are running on. + This can even lead to container escapes. + audit: | + Check to ensure no containers in a ReplicaSet are mounting hostPath volumes as read-write: + + ```kubectl get replicasets -A -o json | jq -r '.items[] | [.spec.template.spec.volumes[] | select(.hostPath != null) | .name] as $myVar | select(.spec.template.spec.containers[].volumeMounts | (. != null and ( .[] | ( [.name] | inside($myVar) ) and .readOnly != true ) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any ReplicaSet containers that mount a hostPath volume as read-write, update them: + + ```yaml + apiVersion: apps/v1 + kind: ReplicaSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - volumeMounts: + - mountPath: /host + name: hostpath-volume + readOnly: true # <-- ensure readOnly is set to true + volumes: + - hostPath: + path: /etc + name: hostpath-volume + ``` + - uid: mondoo-kubernetes-security-job-hostpath-readonly + title: Jobs should mount any host path volumes as read-only + impact: 80 + mql: | + k8s.job.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['containers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + docs: + desc: | + Jobs should not mount volumes of type hostPath as read-write. Containers should not be granted the ability to mutate the underlying host they are running on. + This can even lead to container escapes. + audit: | + Check to ensure no containers in a Job are mounting hostPath volumes as read-write: + + ```kubectl get jobs -A -o json | jq -r '.items[] | [.spec.template.spec.volumes[] | select(.hostPath != null) | .name] as $myVar | select(.spec.template.spec.containers[].volumeMounts | (. != null and ( .[] | ( [.name] | inside($myVar) ) and .readOnly != true ) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Job containers that mount a hostPath volume as read-write, update them: + + ```yaml + apiVersion: batch/v1 + kind: Job + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - volumeMounts: + - mountPath: /host + name: hostpath-volume + readOnly: true # <-- ensure readOnly is set to true + volumes: + - hostPath: + path: /etc + name: hostpath-volume + ``` + - uid: mondoo-kubernetes-security-deployment-hostpath-readonly + title: Deployments should mount any host path volumes as read-only + impact: 80 + mql: | + k8s.deployment.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['containers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + docs: + desc: | + Deployments should not mount volumes of type hostPath as read-write. Containers should not be granted the ability to mutate the underlying host they are running on. + This can even lead to container escapes. + audit: | + Check to ensure no containers in a Deployment are mounting hostPath volumes as read-write: + + ```kubectl get deployments -A -o json | jq -r '.items[] | [.spec.template.spec.volumes[] | select(.hostPath != null) | .name] as $myVar | select(.spec.template.spec.containers[].volumeMounts | (. != null and ( .[] | ( [.name] | inside($myVar) ) and .readOnly != true ) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Deployment containers that mount a hostPath volume as read-write, update them: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - volumeMounts: + - mountPath: /host + name: hostpath-volume + readOnly: true # <-- ensure readOnly is set to true + volumes: + - hostPath: + path: /etc + name: hostpath-volume + ``` + - uid: mondoo-kubernetes-security-statefulset-hostpath-readonly + title: StatefulSets should mount any host path volumes as read-only + impact: 80 + mql: | + k8s.statefulset.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['containers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + docs: + desc: | + StatefulSets should not mount volumes of type hostPath as read-write. Containers should not be granted the ability to mutate the underlying host they are running on. + This can even lead to container escapes. + audit: | + Check to ensure no containers in a StatefulSet are mounting hostPath volumes as read-write: + + ```kubectl get statefulsets -A -o json | jq -r '.items[] | [.spec.template.spec.volumes[] | select(.hostPath != null) | .name] as $myVar | select(.spec.template.spec.containers[].volumeMounts | (. != null and ( .[] | ( [.name] | inside($myVar) ) and .readOnly != true ) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any StatefulSet containers that mount a hostPath volume as read-write, update them: + + ```yaml + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - volumeMounts: + - mountPath: /host + name: hostpath-volume + readOnly: true # <-- ensure readOnly is set to true + volumes: + - hostPath: + path: /etc + name: hostpath-volume + ``` + - uid: mondoo-kubernetes-security-cronjob-hostpath-readonly + title: CronJobs should mount any host path volumes as read-only + impact: 80 + mql: | + k8s.cronjob.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['containers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + docs: + desc: | + CronJobs should not mount volumes of type hostPath as read-write. Containers should not be granted the ability to mutate the underlying host they are running on. + This can even lead to container escapes. + audit: | + Check to ensure no containers in a CronJob are mounting hostPath volumes as read-write: + + ```kubectl get cronjobs -A -o json | jq -r '.items[] | [.spec.jobTemplate.spec.template.spec.volumes[] | select(.hostPath != null) | .name] as $myVar | select(.spec.jobTemplate.spec.template.spec.containers[].volumeMounts | (. != null and ( .[] | ( [.name] | inside($myVar) ) and .readOnly != true ) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any CronJob containers that mount a hostPath volume as read-write, update them: + + ```yaml + apiVersion: batch/v1 + kind: CronJob + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - volumeMounts: + - mountPath: /host + name: hostpath-volume + readOnly: true # <-- ensure readOnly is set to true + volumes: + - hostPath: + path: /etc + name: hostpath-volume + ``` + - uid: mondoo-kubernetes-security-deployment-tiller + title: Deployments should not run Tiller (Helm v2) + impact: 40 + mql: | + k8s.deployment.podSpec["containers"].none( _["image"].contains("tiller") ) + docs: + desc: | + Tiller is the in-cluster component for the Helm v2 package manager. It is communicating directly to the Kubernetes API and therefore it has broad RBAC permissions. An attacker can use that to get cluster-wide access. + audit: | + Verify there are no deployments running Tiller: + ```kubectl get deployments -A -o=custom-columns="NAME:.metadata.name,IMAGE:.spec.template.spec.containers[*].image"``` + remediation: | + Delete any deployments that are running Tiller. + - uid: mondoo-kubernetes-security-pod-tiller + title: Pods should not run Tiller (Helm v2) + impact: 40 + mql: | + k8s.pod.podSpec["containers"].none( _["image"].contains("tiller") ) + k8s.pod.podSpec["initContainers"].none( _["image"].contains("tiller") ) + k8s.pod.podSpec["ephemeralContainers"].none( _["image"].contains("tiller") ) + docs: + desc: | + Tiller is the in-cluster component for the Helm v2 package manager. It is communicating directly to the Kubernetes API and therefore it has broad RBAC permissions. An attacker can use that to get cluster-wide access. + audit: | + Verify there are no pods running Tiller: + ```kubectl get pods -A -o=custom-columns="NAME:.metadata.name,IMAGE:.spec.template.spec.containers[*].image"``` + remediation: | + Delete any pods that are running Tiller. + - uid: mondoo-kubernetes-security-deployment-k8s-dashboard + title: Pods should not run Kubernetes dashboard + impact: 40 + mql: | + k8s.deployment.podSpec["containers"].none( _["image"].contains("kubernetes-dashboard") || _["image"].contains("kubernetesui") ) + k8s.deployment.labels["app"] == null || k8s.deployment.labels["app"] != "kubernetes-dashboard" + k8s.deployment.labels["k8s-app"] == null || k8s.deployment.labels["k8s-app"] != "kubernetes-dashboard" + docs: + desc: | + The Kubernetes dashboard allows browsing through cluster resources such as workloads, configmaps and secrets. In 2019 Tesla was hacked because their Kubernetes dashboard was publicly exposed. This allowed the attackers to extract credentials and deploy Bitcoin miners on the cluster. + audit: | + Verify there are no deployments running Kubernetes dashboard: + ```kubectl get deployments -A -o=custom-columns="NAME:.metadata.name,IMAGE:.spec.template.spec.containers[*].image"``` + remediation: | + Delete any deployments that are running Kubernetes dashboard. + - uid: mondoo-kubernetes-security-pod-k8s-dashboard + title: Pods should not run Kubernetes dashboard + impact: 40 + mql: | + k8s.pod.podSpec["containers"].none( _["image"].contains("kubernetes-dashboard") || _["image"].contains("kubernetesui") ) + k8s.pod.podSpec["initContainers"].none( _["image"].contains("kubernetes-dashboard") || _["image"].contains("kubernetesui") ) + k8s.pod.podSpec["ephemeralContainers"].none( _["image"].contains("kubernetes-dashboard") || _["image"].contains("kubernetesui") ) + k8s.pod.labels["app"] == null || k8s.pod.labels["app"] != "kubernetes-dashboard" + k8s.pod.labels["k8s-app"] == null || k8s.pod.labels["k8s-app"] != "kubernetes-dashboard" + docs: + desc: | + The Kubernetes dashboard allows browsing through cluster resources such as workloads, configmaps and secrets. In 2019 Tesla was hacked because their Kubernetes dashboard was publicly exposed. This allowed the attackers to extract credentials and deploy Bitcoin miners on the cluster. + audit: | + Verify there are no pods running Kubernetes dashboard: + ```kubectl get pods -A -o=custom-columns="NAME:.metadata.name,IMAGE:.spec.template.spec.containers[*].image"``` + remediation: | + Delete any pods that are running Kubernetes dashboard. diff --git a/test/testdata/mondoo-terraform-aws-security.mql.yaml b/test/testdata/mondoo-terraform-aws-security.mql.yaml new file mode 100644 index 00000000..860b399e --- /dev/null +++ b/test/testdata/mondoo-terraform-aws-security.mql.yaml @@ -0,0 +1,563 @@ +# Copyright (c) Mondoo, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +policies: + - uid: mondoo-terraform-aws-security + name: Terraform HCL Security Static Analysis for AWS + version: 1.2.0 + license: BUSL-1.1 + tags: + mondoo.com/category: security + mondoo.com/platform: aws,cloud,terraform + authors: + - name: Mondoo, Inc + email: hello@mondoo.com + docs: + desc: | + ## Overview + + This policy checks for security misconfigurations in Terraform for Amazon Web Services. + + ## Local scan + + Local scan refer to scans of files and operating systems where cnspec is installed. + + ### Scan a Terraform project + + Open a terminal and run this command: + + ```bash + cnspec scan terraform /path/to/terraform/directory + ``` + + ## Join the community! + + Our goal is to build policies that are simple to deploy, accurate, and actionable. + + If you have any suggestions for how to improve this policy, or if you need support, [join the community](https://github.com/orgs/mondoohq/discussions) in GitHub Discussions. + groups: + - title: AWS General + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any(nameLabel == "aws") + checks: + - uid: terraform-aws-security-no-static-credentials-in-providers + - title: Amazon API Gateway + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any(nameLabel == "aws") + checks: + - uid: terraform-aws-security-api-gw-cache-enabled-and-encrypted + - uid: terraform-aws-security-api-gw-execution-logging-enabled + - uid: terraform-aws-security-api-gw-require-authentication + - uid: terraform-aws-security-api-gw-tls + - uid: terraform-aws-security-api-gw-xray-enabled + - title: Amazon Elastic Compute Cloud (Amazon EC2) + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any(nameLabel == "aws") + checks: + - uid: terraform-aws-security-ec2-ebs-encryption-by-default + - uid: terraform-aws-security-ec2-imdsv2 + - uid: terraform-aws-security-ec2-user-data-no-secrets + - title: AWS Identity and Access Management (IAM) + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any(nameLabel == "aws") + checks: + - uid: terraform-aws-security-iam-no-wildcards-policies + - title: Amazon Simple Storage Service (Amazon S3) + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any(nameLabel == "aws") + checks: + - uid: terraform-aws-security-s3-bucket-level-public-access-prohibited + - uid: terraform-aws-security-s3-bucket-logging-enabled + - uid: terraform-aws-security-s3-bucket-public-read-and-write-prohibited + - uid: terraform-aws-security-s3-bucket-server-side-encryption-enabled + - uid: terraform-aws-security-s3-bucket-versioning-enabled + - title: AWS Elastic Kubernetes Service (EKS) Security for Terraform + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any(nameLabel == "aws") + checks: + - uid: terraform-aws-security-eks-encrypt-secrets + - uid: terraform-aws-security-eks-no-public-cluster-access-to-cidr +queries: + - uid: terraform-aws-security-no-static-credentials-in-providers + title: Providers should not contain hard-coded credentials + mql: | + terraform.providers.where( nameLabel == "aws" ) { + arguments["access_key"] == null || arguments["access_key"].find(/(A3T[A-Z0-9]|AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}/).all("AKIAIOSFODNN7EXAMPLE") + arguments["secret_key"] == null || arguments["secret_key"].find(/([A-Za-z0-9\\\/+\\]{40})/).all( "wJalrXUtnFEMI/A1AAAAA/bPxRfiCYAAAAAAAKEY") + } + docs: + desc: | + Hard-coded credentials are not recommended in any Terraform configuration, and risks secret leakage should this file ever be committed to a public version control system. + audit: | + Check for the existence of hard-coded credentials in the AWS provider + + ```hcl + provider "aws" { + region = "us-west-2" + access_key = "my-access-key" + secret_key = "my-secret-key" + } + ``` + remediation: | + The following are more secure alternatives for configuring the AWS provider: + + __Environment Variables__ + You can provide your credentials via the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables representing your AWS Access Key and AWS Secret Key, respectively. Note that setting your AWS credentials using either these (or legacy) environment variables will override the use of `AWS_SHARED_CREDENTIALS_FILE` and `AWS_PROFILE`. The `AWS_DEFAULT_REGION` and `AWS_SESSION_TOKEN` environment variables are also used, if applicable: + + ```bash + $ export AWS_ACCESS_KEY_ID="an_accesskey" + $ export AWS_SECRET_ACCESS_KEY="a_secretkey" + $ export AWS_DEFAULT_REGION="us-west-2" + $ terraform plan + ``` + + ```hcl + provider "aws" {} + ``` + + __Assumed Role__ + If provided with a role ARN, Terraform will attempt to assume this role using the supplied credentials. + + ```hcl + provider "aws" { + assume_role { + role_arn = "arn:aws:iam::ACCOUNT_ID:role/ROLE_NAME" + session_name = "SESSION_NAME" + external_id = "EXTERNAL_ID" + } + } + ``` + - uid: terraform-aws-security-api-gw-cache-enabled-and-encrypted + title: API Gateway must have cache enabled + mql: | + terraform.resources.where( nameLabel == "aws_api_gateway_method_settings") { + blocks.one(type == "settings" && arguments["cache_data_encrypted"] == true) + } + docs: + desc: Ensure that all methods in Amazon API Gateway stages have cache enabled and cache encrypted + audit: | + Check if `cache_data_encrypted` is set to `false` + + ```hcl + resource "aws_api_gateway_method_settings" "fail_example" { + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = aws_api_gateway_stage.example.stage_name + method_path = "path1/GET" + + settings { + metrics_enabled = true + logging_level = "INFO" + cache_data_encrypted = false + } + } + ``` + remediation: | + Enable cache encryption by setting `cache_data_encrypted` to `true` + + ```hcl + resource "aws_api_gateway_method_settings" "good_example" { + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = aws_api_gateway_stage.example.stage_name + method_path = "path1/GET" + + settings { + metrics_enabled = true + logging_level = "INFO" + cache_data_encrypted = true + } + } + ``` + - uid: terraform-aws-security-api-gw-execution-logging-enabled + title: Ensure that all methods in Amazon API Gateway stage have logging enabled + mql: | + terraform.resources.where( nameLabel == "aws_api_gateway_stage") { + blocks.one(type == "access_log_settings" && arguments["destination_arn"] != "" ) + } + + terraform.resources.where( nameLabel == "aws_apigatewayv2_stage") { + blocks.one(type == "access_log_settings" && arguments["destination_arn"] != "" ) + } + refs: + - url: https://docs.aws.amazon.com/config/latest/developerguide/api-gw-execution-logging-enabled.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_method_settings#cache_data_encrypted + title: Terraform Documentation - api_gateway_method_settings Resource + - uid: terraform-aws-security-api-gw-xray-enabled + title: Ensure AWS X-Ray tracing is enabled on Amazon API Gateway REST APIs + mql: | + terraform.resources.where( nameLabel == "aws_api_gateway_stage") { + arguments["xray_tracing_enabled"] == true + } + refs: + - url: https://docs.aws.amazon.com/config/latest/developerguide/api-gw-xray-enabled.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_stage#xray_tracing_enabled + title: Terraform Documentation - api_gateway_stage Resource + - uid: terraform-aws-security-api-gw-require-authentication + title: Ensure Authentication for API Gateway methods is activated + mql: | + terraform.resources + .where( nameLabel == "aws_api_gateway_method" && arguments["authorization"].upcase == "NONE" && arguments["http_method"].upcase != "OPTION" ) + .all(arguments["api_key_required"] == true ) + refs: + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_method#authorization + title: Terraform Documentation - api_gateway_method Resource + - uid: terraform-aws-security-api-gw-tls + title: Ensure that the API Gateway uses a secure SSL/TLS configuration + mql: | + terraform.resources.where( nameLabel == "aws_api_gateway_domain_name") { + arguments["security_policy"] == "TLS-1-2" + } + refs: + - url: https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-custom-domain-tls-version.html + title: Choosing a minimum TLS version for a custom domain in API Gateway + - url: https://docs.aws.amazon.com/config/latest/developerguide/api-gw-ssl-enabled.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_domain_name#security_policy + title: Terraform Documentation - api_gateway_domain_name Resource + - uid: terraform-aws-security-ec2-ebs-encryption-by-default + title: Ensure that Amazon Elastic Block Store (EBS) encryption is enabled by default + mql: | + terraform.resources.where( nameLabel == "aws_ebs_volume").all(arguments["encrypted"] == true) || + terraform.resources.one( nameLabel == "aws_ebs_encryption_by_default" && arguments["enabled"] == true ) + refs: + - url: https://docs.aws.amazon.com/config/latest/developerguide/ec2-ebs-encryption-by-default.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ebs_volume#encrypted + title: Terraform Documentation - ebs_volume Resource + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ebs_encryption_by_default + title: Terraform Documentation - ebs_encryption_by_default Resource + - uid: terraform-aws-security-ec2-imdsv2 + title: Ensure Instance Metadata Service Version 2 (IMDSv2) with session authentication tokens is active + mql: | + terraform.resources.where( nameLabel == "aws_instance") { + blocks.one(type == "metadata_options") + blocks.where(type == "metadata_options") { + arguments["http_tokens"] == "required" || arguments["http_endpoint"] == "disabled" + } + } + refs: + - url: https://aws.amazon.com/blogs/security/defense-in-depth-open-firewalls-reverse-proxies-ssrf-vulnerabilities-ec2-instance-metadata-service + title: Add defense in depth against open firewalls, reverse proxies, and SSRF vulnerabilities with enhancements to the EC2 Instance Metadata Service + - url: https://docs.aws.amazon.com/config/latest/developerguide/ec2-imdsv2-check.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance#metadata-options + title: Terraform Documentation - Metadata Options + - uid: terraform-aws-security-ec2-user-data-no-secrets + title: Ensure EC2 instance user data does not contain secrets + mql: | + terraform.resources.where( nameLabel == "aws_instance" && arguments["user_data"] != null ) { + # ensure that all used AWS_ACCESS_KEY_ID are the sample key + arguments["user_data"] { + _.find(/(A3T[A-Z0-9]|AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}/).all("AKIAIOSFODNN7EXAMPLE") + } + + # ensure that all used secret keys are the sample key + arguments["user_data"] { + _.find(/([A-Za-z0-9\\\/+\\]{40})/).all( "wJalrXUtnFEMI/A1AAAAA/bPxRfiCYAAAAAAAKEY") + } + } + refs: + - url: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-add-user-data.html + title: Work with instance user data + - url: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html + title: Run commands on your Linux instance at launch + - url: https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-windows-user-data.html + title: Run commands on your Windows instance at launch + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance#user_data + title: Terraform Documentation - aws_instance Resource + - uid: terraform-aws-security-iam-no-wildcards-policies + title: Ensure IAM policy do not use wildcards and instead apply the principle of least privilege + mql: | + # verify aws_iam_policy + terraform.resources.where( nameLabel == "aws_iam_policy" && arguments["policy"] != null ) { + arguments["policy"].where( _["Statement"] != null) { + _["Statement"] { + # Resource is either not * or DENY is used (where wildcard is great) + _["Resource"] != "*" || _["Effect"].upcase == "DENY" + } + } + } + + # verify aws_iam_user_policy + terraform.resources.where( nameLabel == "aws_iam_user_policy" && arguments["policy"] != null ) { + arguments["policy"].where( _["Statement"] != null) { + _["Statement"] { + # Resource is either not * or DENY is used (where wildcard is great) + _["Resource"] != "*" || _["Effect"].upcase == "DENY" + } + } + } + + # verify iam_role_policy + terraform.resources.where( nameLabel == "iam_role_policy" && arguments["policy"] != null ) { + arguments["policy"].where( _["Statement"] != null) { + _["Statement"] { + # Resource is either not * or DENY is used (where wildcard is great) + _["Resource"] != "*" || _["Effect"].upcase == "DENY" + } + } + } + + # verify iam_group_policy + terraform.resources.where( nameLabel == "iam_group_policy" && arguments["policy"] != null ) { + arguments["policy"].where( _["Statement"] != null) { + _["Statement"] { + # Resource is either not * or DENY is used (where wildcard is great) + _["Resource"] != "*" || _["Effect"].upcase == "DENY" + } + } + } + refs: + - url: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html + title: Security best practices in IAM + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy + title: Terraform Documentation - iam_policy Resource + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_user_policy + title: Terraform Documentation - iam_user_policy Resource + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy + title: Terraform Documentation - iam_role_policy Resource + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_group_policy + title: Terraform Documentation - iam_group_policy Resource + - uid: terraform-aws-security-s3-bucket-versioning-enabled + title: Ensure that versioning is enabled for your S3 buckets + mql: | + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+3\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket') { + blocks.one(type == 'versioning') + blocks.where(type == 'versioning') { + arguments['enabled'] == true + } + } + } + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+4\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket').map(labels.last) { + bucketnames = _ + terraform.resources.where( nameLabel == 'aws_s3_bucket_versioning' && arguments['bucket'].split('.')[1] == bucketnames ) != [] + terraform.resources.where( nameLabel == 'aws_s3_bucket_versioning' && arguments['bucket'].split('.')[1] == bucketnames ) { + blocks.one( type.downcase == 'versioning_configuration' ) + blocks.where( type.downcase == 'versioning_configuration' ) { + arguments['status'].downcase == 'enabled' + } + } + } + } + refs: + - url: https://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-versioning-enabled.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket + title: Terraform Documentation - s3_bucket Resource + - uid: terraform-aws-security-s3-bucket-logging-enabled + title: Ensure logging is enabled for your S3 buckets + mql: | + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+3\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket') { + blocks.one( type == 'logging') + blocks.where(type == 'logging') { + arguments['target_bucket'] != null + } + } + } + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+4\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket').map(labels.last) { + bucketnames = _ + terraform.resources.where( nameLabel == 'aws_s3_bucket_logging' && arguments['bucket'].split('.')[1] == bucketnames ) != [] + terraform.resources.where( nameLabel == 'aws_s3_bucket_logging' && arguments['bucket'].split('.')[1] == bucketnames ) { + arguments['target_bucket'] != null + } + } + } + refs: + - url: https://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-logging-enabled.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket + title: Terraform Documentation - s3_bucket Resource + - url: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html + title: Logging requests using server access logging + - uid: terraform-aws-security-s3-bucket-level-public-access-prohibited + title: Ensure Amazon Simple Storage Service (Amazon S3) buckets are not publicly accessible + mql: | + terraform.resources.where( nameLabel == 'aws_s3_bucket').map(labels.last) { + bucketnames = _ + terraform.resources.where( nameLabel == 'aws_s3_bucket_public_access_block' && arguments['bucket'].split('.')[1] == bucketnames ) != [] + terraform.resources.where( nameLabel == 'aws_s3_bucket_public_access_block' && arguments['bucket'].split('.')[1] == bucketnames ) { + arguments['block_public_acls'] == true + arguments['block_public_policy'] == true + arguments['ignore_public_acls'] == true + arguments['restrict_public_buckets'] == true + } + } + docs: + desc: | + [Blocking public access to your Amazon S3 storage](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) provides very specific documentation about the various settings for s3 bucket policies. + + - `block_public_acls` - By blocking `public` ACLs, PUT requests will fail if the object has any public ACL defined + - `ignore_public_acls` - By ignoring the bucket ACL, PUT calls with public ACLs will still work, but public ACL will be ignored + - `block_public_policy` - Prevents users from putting a policy that enable public access + - `restrict_public_buckets` - Restricts access to the bucket owner and AWS Services if the bucket has a public policy + remediation: "" + refs: + - url: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html + title: Blocking public access to your Amazon S3 storage + - url: https://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-level-public-access-prohibited.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket + title: Terraform Documentation - s3_bucket Resource + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_public_access_block#restrict_public_buckets + title: Terraform Documentation - s3_bucket Resource - restrict_public_buckets Argument + - uid: terraform-aws-security-s3-bucket-server-side-encryption-enabled + title: Ensure S3 buckets has the Amazon S3 default encryption enabled + mql: | + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+3\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket') { + blocks.one( type == "server_side_encryption_configuration" ) + blocks.where( type == "server_side_encryption_configuration" ) { + blocks.one( _.type == "rule" && _.blocks.one( type == 'apply_server_side_encryption_by_default' )) + } + } + } + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+4\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket_server_side_encryption_configuration') { + blocks.one( type == "rule" ) + blocks.where( type == "rule" ) { + blocks.one( _.type == 'apply_server_side_encryption_by_default' ) + } + } + } + docs: + desc: | + Amazon S3 default encryption is an optional configuration that sets the default encryption behavior for an S3 bucket. Enabling default SSE configures S3 buckets so that all new objects are encrypted when they are stored in the bucket. The objects are encrypted using server-side encryption with either Amazon S3-managed keys (SSE-S3) or AWS KMS keys stored in AWS Key Management Service (AWS KMS) (SSE-KMS). + + Enabling SSE by default reduces the risk of unauthorized access to objects stored in the bucket. + remediation: "" + refs: + - url: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucket-encryption.html + title: Setting default server-side encryption behavior for Amazon S3 buckets + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket + title: Terraform Documentation - s3_bucket Resource + - url: https://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-server-side-encryption-enabled.html + title: AWS Config Managed Rules + - uid: terraform-aws-security-s3-bucket-public-read-and-write-prohibited + title: Ensure Amazon S3 buckets do not allow public read access + mql: | + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+3\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket') { + arguments['acl'] != /public-read/ + } + } + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+4\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket').map(labels.last) { + bucketnames = _ + terraform.resources.where( nameLabel == 'aws_s3_bucket_acl' && arguments['bucket'].split('.')[1] == bucketnames ) != [] + terraform.resources.where( nameLabel == 'aws_s3_bucket_acl' && arguments['bucket'].split('.')[1] == bucketnames ) { + arguments['acl'].downcase != /public-read/ + } + } + } + refs: + - url: https://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-public-read-prohibited.html + title: AWS Config Managed Rules - public read + - url: https://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-public-write-prohibited.html + title: AWS Config Managed Rules - public write + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket#acl + title: Terraform Documentation - s3_bucket Resource - acl Argument + - uid: terraform-aws-security-eks-encrypt-secrets + title: EKS should have the encryption of secrets enabled + mql: | + terraform.resources.where( nameLabel == "aws_eks_cluster" ) { + blocks.one( type == "encryption_config" ) + } + docs: + desc: | + EKS cluster resources should have the encryption_config block set with protection of the secrets resource. + + __Possible Impact__ + + EKS secrets could be read if compromised + + __Suggested Resolution__ + + Enable encryption of EKS secrets + audit: | + The following example will fail the `eks-encrypt-secrets` check: + + ```hcl + resource "aws_eks_cluster" "bad_example" { + name = "bad_example_cluster" + + role_arn = var.cluster_arn + vpc_config { + endpoint_public_access = false + } + } + ``` + remediation: | + The following example will pass the `eks-encrypt-secrets` check: + + ```hcl + resource "aws_eks_cluster" "good_example" { + encryption_config { + resources = [ "secrets" ] + provider { + key_arn = var.kms_arn + } + } + + name = "good_example_cluster" + role_arn = var.cluster_arn + vpc_config { + endpoint_public_access = false + } + } + ``` + - uid: terraform-aws-security-eks-no-public-cluster-access-to-cidr + title: EKS Clusters should restrict access to public API server + mql: |- + terraform.resources.where( nameLabel == "aws_eks_cluster" ) { + blocks.where( type == "vpc_config" ) { + arguments['endpoint_public_access'] == false || arguments['public_access_cidrs'].none( "0.0.0.0/0") + } + } + docs: + desc: | + EKS Clusters have public access CIDRs set to 0.0.0.0/0 by default which is wide open to the internet. This should be explicitly set to a more specific private CIDR range. + + __Possible Impact__ + + EKS can be accessed from the internet + + __Suggested Resolution__ + + Don't enable public access to EKS Clusters. + audit: | + The following example will fail the eks-no-public-cluster-access-to-cidr check. + + ```hcl + resource "aws_eks_cluster" "bad_example" { + + name = "bad_example_cluster" + role_arn = var.cluster_arn + vpc_config { + endpoint_public_access = true + } + } + ``` + remediation: | + The following example will pass the eks-no-public-cluster-access-to-cidr check: + + ```hcl + resource "aws_eks_cluster" "good_example" { + name = "good_example_cluster" + role_arn = var.cluster_arn + vpc_config { + endpoint_public_access = true + public_access_cidrs = ["10.2.0.0/8"] + } + } + ``` diff --git a/test/testdata/mondoo-terraform-gcp-security.mql.yaml b/test/testdata/mondoo-terraform-gcp-security.mql.yaml new file mode 100644 index 00000000..284df76d --- /dev/null +++ b/test/testdata/mondoo-terraform-gcp-security.mql.yaml @@ -0,0 +1,2297 @@ +# Copyright (c) Mondoo, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +policies: + - uid: mondoo-terraform-gcp-security + name: Terraform HCL Security Static Analysis for Google Cloud + version: 1.2.0 + license: BUSL-1.1 + tags: + mondoo.com/category: security + mondoo.com/platform: gcp,cloud,terraform + authors: + - name: Mondoo, Inc + email: hello@mondoo.com + docs: + desc: | + ## Overview + + This checks for security misconfigurations in Terraform HCL for Google Cloud. + + ## Local scan + + Local scan refer to scans of files and operating systems where cnspec is installed. + + ### Scan a Terraform project + + Open a terminal and run this command: + + ```bash + cnspec scan terraform /path/to/terraform/directory + ``` + + ## Join the community! + + Our goal is to build policies that are simple to deploy, accurate, and actionable. + + If you have any suggestions for how to improve this policy, or if you need support, [join the community](https://github.com/orgs/mondoohq/discussions) in GitHub Discussions. + groups: + - title: GCP BigQuery + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any( nameLabel == "google" ) + checks: + - uid: terraform-gcp-security-bigquery-no-public-access + - title: GCP Identity and Access Management (IAM) + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any( nameLabel == "google" ) + checks: + - uid: terraform-gcp-security-iam-no-folder-level-default-service-account-assignment + - uid: terraform-gcp-security-iam-no-folder-level-service-account-impersonation + - uid: terraform-gcp-security-iam-no-privileged-service-accounts + - title: GCP Cloud Storage + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any( nameLabel == "google" ) + checks: + - uid: terraform-gcp-security-storage-enable-ubla + - uid: terraform-gcp-security-storage-no-public-access + - title: GCP Compute + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any( nameLabel == "google" ) + checks: + - uid: terraform-gcp-security-compute-disk-encryption-customer-key + - uid: terraform-gcp-security-compute-disk-encryption-required + - uid: terraform-gcp-security-compute-enable-shielded-vm + - uid: terraform-gcp-security-compute-enable-vpc-flow-logs + - uid: terraform-gcp-security-compute-no-default-service-account + - uid: terraform-gcp-security-compute-no-ip-forwarding + - uid: terraform-gcp-security-compute-no-plaintext-vm-disk-keys + - uid: terraform-gcp-security-compute-no-public-ip + - title: GCP DNS + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any( nameLabel == "google" ) + checks: + - uid: terraform-gcp-security-dns-enable-dnssec + - uid: terraform-gcp-security-dns-no-rsa-sha1 + - title: GCP Google Kubernetes Engine (GKE) + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any( nameLabel == "google" ) + checks: + - uid: terraform-gcp-security-gke-enable-auto-repair + - uid: terraform-gcp-security-gke-enable-auto-upgrade + - uid: terraform-gcp-security-gke-enable-ip-aliasing + - uid: terraform-gcp-security-gke-enable-master-networks + - uid: terraform-gcp-security-gke-enable-network-policy + - uid: terraform-gcp-security-gke-enable-private-cluster + - uid: terraform-gcp-security-gke-enable-stackdriver-logging + - uid: terraform-gcp-security-gke-enable-stackdriver-monitoring + - uid: terraform-gcp-security-gke-metadata-endpoints-disabled + - uid: terraform-gcp-security-gke-no-basic-authentication + - uid: terraform-gcp-security-gke-no-client-cert-authentication + - uid: terraform-gcp-security-gke-no-public-control-plane + - uid: terraform-gcp-security-gke-node-metadata-security + - uid: terraform-gcp-security-gke-node-pool-uses-cos + - uid: terraform-gcp-security-gke-node-shielding-enabled + - uid: terraform-gcp-security-gke-use-cluster-labels + - uid: terraform-gcp-security-gke-use-rbac-permissions + - uid: terraform-gcp-security-gke-use-service-account +queries: + - uid: terraform-gcp-security-iam-no-folder-level-default-service-account-assignment + title: Roles should not be assigned to default service accounts + mql: | + terraform.resources.where( nameLabel == "google_folder_iam_member") { + arguments['member'] != /.+@appspot\.gserviceaccount\.com/ && + arguments['member'] != /.+-compute@developer\.gserviceaccount\.com/ && + arguments['member'] != /data\.google_compute_default_account/ + } + docs: + desc: | + Default service accounts should not be used when granting access to folders as this can violate least privilege. It is recommended to use specialized service accounts instead. + + Some Google Cloud services create default service accounts when you first enable the API in a Google Cloud project. By default, these service accounts are granted the Editor role (roles/editor) on the Cloud project, which allows them to read and modify all resources in the Cloud project. This amount of access isn't essential for the services to work: To access resources in your Cloud project, Google Cloud services use service agents, not the default service accounts. + audit: | + Check if `member` is configured to use default service accounts `compute@developer.gserviceaccount.com`, `appspot.gserviceaccount.com`, or if a `data.google_compute_default_service_account` is being used + + ```hcl + resource "google_folder_iam_member" "folder-123" { + folder = "folder-123" + role = "roles/my-role" + member = "123-compute@developer.gserviceaccount.com" + } + + resource "google_folder_iam_member" "folder-456" { + folder = "folder-456" + role = "roles/my-role" + member = "123@appspot.gserviceaccount.com" + } + + data "google_compute_default_service_account" "default" { + } + + resource "google_folder_iam_member" "folder-789" { + folder = "folder-789" + role = "roles/my-role" + member = data.google_compute_default_service_account.default.id + } + ``` + remediation: | + Define a service account with least privilege for the role + + ```hcl + resource "google_service_account" "limited" { + account_id = "account123" + display_name = "account123" + } + + resource "google_folder_iam_member" "folder-123" { + folder = "folder-123" + role = "roles/my-role" + member = "serviceAccount:${google_service_account.limited.email}" + } + ``` + - uid: terraform-gcp-security-iam-no-folder-level-service-account-impersonation + title: Users should not be granted service account access at the folder level + mql: | + terraform.resources.where( nameLabel == "google_folder_iam_binding") { + arguments['role'] != /iam\.serviceAccountUser/ + } + docs: + desc: | + Users with service account access at the folder level can impersonate any service account. Instead, they should be given access to particular service accounts as required. + audit: | + Check if `role` is configured with `roles/iam.serviceAccountUser` + + ```hcl + resource "google_folder_iam_binding" "folder-123" { + folder = "folder-123" + role = "roles/iam.serviceAccountUser" + } + ``` + remediation: | + Define a custom role with least privilege + + ```hcl + resource "google_folder_iam_binding" "folder-123" { + folder = "folder-123" + role = "roles/custom-role" + } + ``` + - uid: terraform-gcp-security-iam-no-privileged-service-accounts + title: Service accounts should not have roles assigned with excessive privileges + mql: | + terraform.resources.where( nameLabel == "google_project_iam_member") { + arguments['role'] != /roles\/owner/ && + arguments['role'] != /roles\/editor/ + } + docs: + desc: | + Service accounts should have a minimal set of permissions assigned to accomplish their job. They should never have excessive access because if compromised, an attacker can escalate privileges and take over the entire account. + audit: | + Check if `role` is configured with basic roles: `roles/editor`, `roles/owner` + + ```hcl + resource "google_service_account" "test" { + account_id = "account123" + display_name = "account123" + } + + resource "google_project_iam_member" "project" { + project = "your-project-id" + role = "roles/owner" + member = "serviceAccount:${google_service_account.test.email}" + } + ``` + remediation: | + Define a custom role with least privilege + + ```hcl + resource "google_service_account" "test" { + account_id = "account123" + display_name = "account123" + } + + resource "google_project_iam_member" "project" { + project = "your-project-id" + role = "roles/logging.logWriter" + member = "serviceAccount:${google_service_account.test.email}" + } + ``` + - uid: terraform-gcp-security-storage-no-public-access + title: Ensure that Cloud Storage bucket is not publicly accessible + mql: | + terraform.resources.where( nameLabel == "google_storage_bucket_iam_binding") { + attributes['members']['value'] { _ != /allUsers/ && _ != /allAuthenticatedUsers/} + } + docs: + desc: | + Google Cloud Storage buckets that define 'allUsers' or 'allAuthenticatedUsers' as members in an IAM member/binding causes data to be exposed outside of the organization. This can lead to exposure of sensitive data. The recommended approach is to restrict public access. + audit: | + Check if `members` is configured with `allAuthenticatedUsers` or `allUsers` + + ```hcl + resource "google_storage_bucket_iam_binding" "allAuthenticatedUsers" { + bucket = google_storage_bucket.default.name + role = "roles/storage.admin" + members = [ + "allAuthenticatedUsers", + ] + } + + resource "google_storage_bucket_iam_binding" "allUsers" { + bucket = google_storage_bucket.default.name + role = "roles/storage.admin" + members = [ + "allUsers", + ] + } + ``` + remediation: | + Restrict public access to the bucket. + + ```hcl + resource "google_storage_bucket_iam_binding" "binding" { + bucket = google_storage_bucket.default.name + role = "roles/storage.admin" + members = [ + "user:jane@example.com", + ] + } + ``` + - uid: terraform-gcp-security-storage-enable-ubla + title: Ensure that Cloud Storage buckets have uniform bucket-level access enabled + mql: | + terraform.resources.where( nameLabel == "google_storage_bucket") { + arguments['uniform_bucket_level_access'] == true + } + docs: + desc: | + Google Cloud Storage buckets should be configured with uniform bucket-level access. + + When you enable uniform bucket-level access on a bucket, Access Control Lists (ACLs) are disabled, and only bucket-level Identity and Access Management (IAM) permissions grant access to that bucket and the objects it contains. You revoke all access granted by object ACLs and the ability to administrate permissions using bucket ACLs. + audit: | + Check if `uniform_bucket_level_access` is set to `true` + + ```hcl + resource "google_storage_bucket" "static-site" { + name = "image-store.com" + location = "EU" + force_destroy = true + + uniform_bucket_level_access = false + + website { + main_page_suffix = "index.html" + not_found_page = "404.html" + } + cors { + origin = ["http://image-store.com"] + method = ["GET", "HEAD", "PUT", "POST", "DELETE"] + response_header = ["*"] + max_age_seconds = 3600 + } + } + ``` + remediation: | + Configure `uniform_bucket_level_access` to `true` + + ```hcl + resource "google_storage_bucket" "static-site" { + name = "image-store.com" + location = "EU" + force_destroy = true + + uniform_bucket_level_access = true + + website { + main_page_suffix = "index.html" + not_found_page = "404.html" + } + cors { + origin = ["http://image-store.com"] + method = ["GET", "HEAD", "PUT", "POST", "DELETE"] + response_header = ["*"] + max_age_seconds = 3600 + } + } + ``` + - uid: terraform-gcp-security-compute-no-public-ip + title: Compute instances should not be publicly exposed to the internet + mql: | + terraform.resources.where( nameLabel == "google_compute_instance") { + blocks.where( type == "network_interface") { + blocks.where( type == "access_config") { + arguments.values.length != 0 + } + } + } + docs: + desc: | + Google Cloud compute instances that have a public IP address are exposed on the internet and are at risk to attack. + audit: | + Check if the `access_config` is empty. + + ```hcl + resource "google_compute_instance" "bad_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + network_interface { + network = "default" + + access_config { + // Ephemeral IP + } + } + } + ``` + remediation: | + Configure compute instance without empty `access_config` + + ```hcl + resource "google_compute_instance" "good_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + network_interface { + network = "default" + } + } + ``` + - uid: terraform-gcp-security-compute-disk-encryption-customer-key + title: Disks should be encrypted with Customer Supplied Encryption Keys + mql: | + terraform.resources.where( nameLabel == "google_compute_disk" ) { + blocks.one( type == "disk_encryption_key") + } + terraform.resources.where( nameLabel == "google_compute_disk" && blocks.one( type == "disk_encryption_key") ) { + blocks.where( type == "disk_encryption_key") { + arguments != "" + } + } + docs: + desc: | + Google Cloud compute instances should use disk encryption using a customer-supplied encryption key. If you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key, and you do not need to provide the key to use the disk later. + audit: | + Check if `disk_encryption_key` key is defined and that the arguments are not empty strings. + + ```hcl + resource "google_compute_disk" "bad_example" { + name = "test-disk" + type = "pd-ssd" + zone = "us-central1-a" + image = "debian-9-stretch-v20200805" + labels = { + environment = "dev" + } + physical_block_size_bytes = 4096 + } + ``` + remediation: | + Configure compute instance with `disk_encryption_key` and `kms_key_self_link` defined. + + ```hcl + resource "google_compute_disk" "good_example" { + name = "test-disk" + type = "pd-ssd" + zone = "us-central1-a" + image = "debian-9-stretch-v20200805" + labels = { + environment = "dev" + } + physical_block_size_bytes = 4096 + disk_encryption_key { + kms_key_self_link = "something" + } + } + ``` + - uid: terraform-gcp-security-compute-disk-encryption-required + title: Disk encryption Keys should not be passed as plaintext + mql: | + terraform.resources.where( nameLabel == "google_compute_disk" && blocks.one( type == "disk_encryption_key") ) { + blocks.where( type == "disk_encryption_key") { + arguments.keys[0] != "raw_key" + } + } + docs: + desc: | + Google Cloud compute instances should use disk encryption using a customer-supplied encryption key. One of the options is for the `disk_encryption_key` is `raw_key`, which is the key in plaintext. + + Sensitive values such as raw encryption keys should not be included in your Terraform code and should be stored securely by a secrets manager. + audit: | + Check if the `access_config` is empty + + ```hcl + resource "google_compute_disk" "good_example" { + disk_encryption_key { + raw_key="b2ggbm8gdGhpcyBpcyBiYWQ=" + } + } + ``` + remediation: | + Configure compute instance with `disk_encryption_key` and `kms_key_self_link` defined + + ```hcl + resource "google_compute_disk" "good_example" { + disk_encryption_key { + kms_key_self_link = google_kms_crypto_key.my_crypto_key.id + } + } + ``` + - uid: terraform-gcp-security-compute-enable-shielded-vm + title: Verify shielded VM is enabled on compute instances + mql: | + terraform.resources.where( nameLabel == "google_compute_instance" ) { + blocks.one( type == "shielded_instance_config" ) + } + terraform.resources.where( nameLabel == "google_compute_instance" && blocks.one( type == "shielded_instance_config" )) { + blocks.where( type == "shielded_instance_config") { + attributes['enable_vtpm'] == null || attributes['enable_vtpm']['value'] == true + } + } + terraform.resources.where( nameLabel == "google_compute_instance" && blocks.one( type == "shielded_instance_config" )) { + blocks.where( type == "shielded_instance_config") { + attributes['enable_integrity_monitoring'] == null || attributes['enable_integrity_monitoring']['value'] == true + } + } + docs: + desc: | + Shielded VMs are virtual machines (VMs) on Google Cloud hardened by a set of security controls that help defend against rootkits and bootkits. Using Shielded VMs helps protect enterprise workloads from threats like remote attacks, privilege escalation, and malicious insiders. Shielded VMs leverage advanced platform security capabilities such as secure and measured boot, a virtual trusted platform module (vTPM), UEFI firmware, and integrity monitoring. + + **Secure Boot** helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails. + + **Integrity monitoring** helps you understand and make decisions about the state of your VM instances. Integrity monitoring compares the most recent boot measurements to the integrity policy baseline and returns a pair of pass/fail results depending on whether they match or not, one for the early boot sequence and one for the late boot sequence. + audit: | + Check if the `shielded_instance_config` is configured on the instance, and if `enable_vtpm` and `enable_integrity_monitoring` are set to `false` + + ```hcl + resource "google_compute_instance" "bad_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + shielded_instance_config { + enable_vtpm = false + enable_integrity_monitoring = false + } + } + ``` + remediation: | + Configure `shielded_instance_config` without `enable_vtpm` and `enable_integrity_monitoring`, or configure `enable_vtpm` and `enable_integrity_monitoring` explicitly to `true` + + ```hcl + resource "google_compute_instance" "good_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + shielded_instance_config { + enable_vtpm = true + enable_integrity_monitoring = true + } + } + ``` + - uid: terraform-gcp-security-compute-enable-vpc-flow-logs + title: Verify VPC flow logs enabled on compute instances + mql: | + terraform.resources.where( nameLabel == "google_compute_subnetwork" && arguments['purpose'] != "INTERNAL_HTTPS_LOAD_BALANCER" ) { + blocks.one( type == "log_config") + } + docs: + desc: | + VPC flow logs record information about all traffic, which is a vital tool in reviewing anomalous traffic. Google Compute Engine subnetworks that do not have VPC flow logs enabled have limited information for auditing and awareness. + + Note: Google Compute Engine subnets configured as INTERNAL_HTTPS_LOAD_BALANCER do not support VPC flow logs. Compute subnetworks with `purpose INTERNAL_HTTPS_LOAD_BALANCER` attribute will not be evaluated. + audit: | + The following example will fail: + + ```terraform + + resource "google_compute_subnetwork" "bad_example" { + name = "test-subnetwork" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.id + secondary_ip_range { + range_name = "tf-test-secondary-range-update1" + ip_cidr_range = "192.168.10.0/24" + } + } + + resource "google_compute_network" "custom-test" { + name = "test-network" + auto_create_subnetworks = false + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_compute_subnetwork" "good_example" { + name = "test-subnetwork" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.id + secondary_ip_range { + range_name = "tf-test-secondary-range-update1" + ip_cidr_range = "192.168.10.0/24" + } + log_config { + aggregation_interval = "INTERVAL_10_MIN" + flow_sampling = 0.5 + metadata = "INCLUDE_ALL_METADATA" + } + } + + resource "google_compute_network" "custom-test" { + name = "test-network" + auto_create_subnetworks = false + } + + ``` + - uid: terraform-gcp-security-compute-no-default-service-account + title: Compute instances should not use the default service account + mql: | + terraform.resources.where( nameLabel == "google_compute_instance" && blocks.one( type == "service_account") ) { + blocks.where( type == "service_account" ) { + attributes['email'] != null + } + } + terraform.resources.where( nameLabel == "google_compute_instance" && blocks.one( type == "service_account") ) { + blocks.where( type == "service_account" ) { + attributes['email'] != /.+-compute@developer\.gserviceaccount.com/ + } + } + docs: + desc: | + The default service account has full project access. Provisioning instances using the default service account gives the instance full access to the project. Compute instances should instead be assigned the minimal access they need. + audit: | + The following example will fail: + + ```terraform + + resource "google_compute_instance" "default" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + service_account { + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + email = "1234567890-compute@developer.gserviceaccount.com" + scopes = ["cloud-platform"] + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service_account_id" + display_name = "Service Account" + } + + resource "google_compute_instance" "default" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + network_interface { + network = "default" + + access_config { + // Ephemeral IP + } + } + + metadata = { + foo = "bar" + } + + metadata_startup_script = "echo hi > /test.txt" + + service_account { + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + email = google_service_account.default.email + scopes = ["cloud-platform"] + } + } + + ``` + - uid: terraform-gcp-security-compute-no-ip-forwarding + title: Compute instances should be configured with IP forwarding + mql: | + terraform.resources.where( nameLabel == "google_compute_instance" && attributes['can_ip_forward']) { + attributes['can_ip_forward']['value'] == false + } + docs: + desc: | + Disabling IP forwarding ensures the instance can only receive packets addressed to the instance and can only send packets with a source address of the instance. + + The attribute `can_ip_forward` is optional on `google_compute_instance` and defaults to `false`. Instances with `can_ip_forward = true` will fail. + audit: | + The following example will fail: + + ```terraform + + resource "google_compute_instance" "bad_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + can_ip_forward = false + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_compute_instance" "bad_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + can_ip_forward = false + } + + ``` + - uid: terraform-gcp-security-compute-no-plaintext-vm-disk-keys + title: VM disk encryption keys should not be provided in plaintext + mql: | + terraform.resources.where( nameLabel == "google_compute_instance" ) { + blocks { arguments.keys { _ != 'disk_encryption_key_raw' } } + } + docs: + desc: | + Providing your encryption key in plaintext format means anyone with access to the source code also has access to the key. + + When encrypting a `boot_disk`, it is not recommended to use the `disk_encryption_key_raw` argument as this passes the key in plaintext, which is not secure. Consider using `kms_key_self_link` or a secrets manager instead. + audit: | + The following example will fail: + + ```terraform + + resource "google_compute_instance" "bad_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + disk_encryption_key_raw = "something" + } + + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_compute_instance" "bad_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + kms_key_self_link = "kmsKeyName" + } + + } + ``` + - uid: terraform-gcp-security-bigquery-no-public-access + title: BigQuery datasets should only be accessible within the organization + mql: | + terraform.resources.where( nameLabel == "google_bigquery_dataset" ) { + blocks { arguments.values.none("allAuthenticatedUsers") } + } + docs: + desc: | + BigQuery datasets should not be configured to provide access to `allAuthenticatedUsers` as this provides any authenticated GCP user, even those outside of your organization, access to your BigQuery dataset. This can lead to exposure of sensitive data to the public internet. + + Configure access permissions with higher granularity and least privilege principles. + audit: | + The following example will fail: + + ```terraform + + resource "google_bigquery_dataset" "bad_example" { + dataset_id = "example_dataset" + friendly_name = "test" + description = "This is a test description" + location = "EU" + default_table_expiration_ms = 3600000 + + labels = { + env = "default" + } + + access { + role = "OWNER" + special_group = "allAuthenticatedUsers" + } + + access { + role = "READER" + domain = "hashicorp.com" + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_bigquery_dataset" "good_example" { + dataset_id = "example_dataset" + friendly_name = "test" + description = "This is a test description" + location = "EU" + default_table_expiration_ms = 3600000 + + labels = { + env = "default" + } + + access { + role = "OWNER" + user_by_email = google_service_account.bqowner.email + } + + access { + role = "READER" + domain = "hashicorp.com" + } + } + + resource "google_service_account" "bqowner" { + account_id = "bqowner" + } + ``` + - uid: terraform-gcp-security-dns-enable-dnssec + title: Cloud DNS should use DNSSEC + mql: | + terraform.resources.where( nameLabel == "google_dns_managed_zone" ) { + blocks.where( type == "dnssec_config" ) { + attributes['state']['value'] != "off" + } + } + docs: + desc: | + DNSSEC authenticates DNS responses, preventing MITM attacks and impersonation. Unverified DNS responses could lead to man-in-the-middle attacks. + audit: | + The following example will fail: + + ```terraform + + resource "google_dns_managed_zone" "bad_example" { + name = "example-zone" + dns_name = "example-${random_id.rnd.hex}.com." + description = "Example DNS zone" + labels = { + foo = "bar" + } + dnssec_config { + state = "off" + } + } + + resource "random_id" "rnd" { + byte_length = 4 + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_dns_managed_zone" "good_example" { + name = "example-zone" + dns_name = "example-${random_id.rnd.hex}.com." + description = "Example DNS zone" + labels = { + foo = "bar" + } + dnssec_config { + state = "on" + } + } + + resource "random_id" "rnd" { + byte_length = 4 + } + ``` + - uid: terraform-gcp-security-dns-no-rsa-sha1 + title: Zone signing should not use RSA SHA1 + mql: | + terraform.datasources.where( nameLabel == "google_dns_keys" ) { + blocks { attributes['algorithm']['value'] != "rsasha1" } + } + docs: + desc: | + RSA SHA1 is a weaker algorithm than SHA2-based algorithms such as RSA SHA256/512. + audit: | + The following example will fail: + + ```terraform + + resource "google_dns_managed_zone" "foo" { + name = "foobar" + dns_name = "foo.bar." + + dnssec_config { + state = "on" + non_existence = "nsec3" + } + } + + data "google_dns_keys" "foo_dns_keys" { + managed_zone = google_dns_managed_zone.foo.id + zone_signing_keys { + algorithm = "rsasha1" + } + } + + output "foo_dns_ds_record" { + description = "DS record of the foo subdomain." + value = data.google_dns_keys.foo_dns_keys.key_signing_keys[0].ds_record + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_dns_managed_zone" "foo" { + name = "foobar" + dns_name = "foo.bar." + + dnssec_config { + state = "on" + non_existence = "nsec3" + } + } + + data "google_dns_keys" "foo_dns_keys" { + managed_zone = google_dns_managed_zone.foo.id + zone_signing_keys { + algorithm = "rsasha512" + } + } + + output "foo_dns_ds_record" { + description = "DS record of the foo subdomain." + value = data.google_dns_keys.foo_dns_keys.key_signing_keys[0].ds_record + } + ``` + - uid: terraform-gcp-security-gke-enable-auto-repair + title: Kubernetes should have 'Automatic repair' enabled + mql: | + terraform.resources.where( nameLabel == "google_container_node_pool" ) { + blocks.where( type == "management") { + arguments['auto_repair'] != false + } + } + docs: + desc: | + Automatic repair will monitor nodes and attempt repair when a node fails multiple subsequent health checks. Failing nodes will require manual repair. + audit: | + The following example will fail: + + ```terraform + + resource "google_container_node_pool" "bad_example" { + name = "my-node-pool" + cluster = google_container_cluster.primary.id + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + management { + auto_repair = false + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "primary" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + } + + resource "google_container_node_pool" "good_example" { + name = "my-node-pool" + cluster = google_container_cluster.primary.id + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + management { + auto_repair = true + } + } + ``` + - uid: terraform-gcp-security-gke-enable-auto-upgrade + title: Kubernetes should have 'Automatic upgrade' enabled + mql: | + terraform.resources.where( nameLabel == "google_container_node_pool" ) { + blocks.where( type == "management") { + arguments['auto_upgrade'] != false + } + } + docs: + desc: | + Automatic updates keep nodes updated with the latest cluster master version. + audit: | + The following example will fail: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "primary" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + } + + resource "google_container_node_pool" "bad_example" { + name = "my-node-pool" + cluster = google_container_cluster.primary.id + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + management { + auto_upgrade = false + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "primary" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + } + + resource "google_container_node_pool" "good_example" { + name = "my-node-pool" + cluster = google_container_cluster.primary.id + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + management { + auto_upgrade = true + } + } + ``` + - uid: terraform-gcp-security-gke-enable-ip-aliasing + title: Clusters should have IP aliasing enabled + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + blocks.one( type == "ip_allocation_policy" ) + } + docs: + desc: | + IP aliasing allows the reuse of public IPs internally, removing the need for a NAT gateway. + audit: | + The following example will fail: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + ip_allocation_policy = {} + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + `,` + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + ip_allocation_policy { + cluster_secondary_range_name = "some range name" + services_secondary_range_name = "some range name" + } + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + ``` + - uid: terraform-gcp-security-gke-enable-master-networks + title: Master authorized networks should be configured on GKE clusters + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + arguments.keys.contains("master_authorized_networks_config") + } + docs: + desc: | + Enabling authorized networks means you can restrict master access to a fixed set of CIDR ranges. + audit: | + The following example will fail: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "primary" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "primary" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + master_authorized_networks_config = [{ + cidr_blocks = [{ + cidr_block = "10.10.128.0/24" + display_name = "internal" + }] + }] + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + ``` + - uid: terraform-gcp-security-gke-enable-network-policy + title: Network Policy should be enabled on GKE clusters + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + blocks.one( type == "network_policy" ) + } + docs: + desc: | + Enabling a network policy allows the segregation of network traffic by namespace. + audit: | + The following example will fail: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + network_policy { + enabled = false + } + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + network_policy { + enabled = true + } + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + ``` + - uid: terraform-gcp-security-gke-enable-private-cluster + title: Clusters should be set to private + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + blocks.one( type == "network_policy" ) + } + docs: + desc: | + Enabling private nodes on a cluster ensures the nodes are only available internally as they will only be assigned internal addresses. + audit: | + The following example will fail: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + network_policy { + enabled = false + } + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + network_policy { + enabled = true + } + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + ``` + - uid: terraform-gcp-security-gke-enable-stackdriver-logging + title: Stackdriver Logging should be enabled + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + attributes.keys.contains( "logging_service" ) + } + terraform.resources.where( nameLabel == "google_container_cluster" ) { + attributes['logging_service']['value'] == 'logging.googleapis.com/kubernetes' + } + docs: + desc: | + StackDriver logging provides a useful interface to all of stdout/stderr for each container and should be enabled for monitoring, debugging, etc. Without Stackdriver, visibility to the cluster will be reduced. + audit: | + The following example will fail: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + logging_service = "logging.googleapis.com" + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + logging_service = "logging.googleapis.com/kubernetes" + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + ``` + - uid: terraform-gcp-security-gke-enable-stackdriver-monitoring + title: Stackdriver Monitoring should be enabled + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + attributes.keys.contains( "monitoring_service" ) + } + terraform.resources.where( nameLabel == "google_container_cluster" ) { + attributes['monitoring_service']['value'] == 'monitoring.googleapis.com/kubernetes' + } + docs: + desc: | + StackDriver monitoring aggregates logs, events, and metrics from your Kubernetes environment on GKE to help you understand your application's behavior in production. + audit: | + The following example will fail: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + monitoring_service = "monitoring.googleapis.com" + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + monitoring_service = "monitoring.googleapis.com/kubernetes" + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + ``` + - uid: terraform-gcp-security-gke-metadata-endpoints-disabled + title: Legacy metadata endpoints enabled + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + attributes['metadata']['value']['disable-legacy-endpoints'] != false + } + docs: + desc: | + The Compute Engine instance metadata server exposes legacy v0.1 and v1beta1 endpoints, which do not enforce metadata query headers. This is a feature in the v1 APIs that makes it more difficult for a potential attacker to retrieve instance metadata. Unless specifically required, we recommend you disable these legacy APIs. When setting the `metadata` block, the default value for `disable-legacy-endpoints` is set to `true`, they should not be explicitly enabled. + audit: | + The following example will fail: + + ```terraform + + resource "google_container_cluster" "bad_example" { + metadata { + disable-legacy-endpoints = false + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_container_cluster" "good_example" { + metadata { + disable-legacy-endpoints = true + } + } + ``` + - uid: terraform-gcp-security-gke-no-client-cert-authentication + title: Clusters should not use client certificates for authentication + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + blocks.where( type == "master_auth" ) { + blocks { attributes['issue_client_certificate']['value'] != true } + } + } + docs: + desc: | + There are several methods of authenticating to the Kubernetes API server. In GKE, the supported methods are service account bearer tokens, OAuth tokens, and x509 client certificates. Prior to GKE's integration with OAuth, a one-time generated x509 certificate or static password were the only available authentication methods, but are now not recommended and should be disabled. These methods present a wider surface of attack for cluster compromise and have been disabled by default since GKE version 1.12. If you are using legacy authentication methods, we recommend that you turn them off. Authentication with a static password is deprecated and has been removed since GKE version 1.19. + + Existing clusters should move to OAuth. + audit: | + The following example will fail due to the `master_auth` block that includes the `issue_client_certificate = true` configuration which is set to `false` by default: + + ```terraform + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + master_auth { + client_certificate_config { + issue_client_certificate = true + } + } + } + ``` + remediation: | + The following example will pass since the `master_auth` block is not specified and secure defaults are used instead: + + ```terraform + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + } + ``` + The following example will pass because the `master_auth` block is explicitly configuring `issue_client_certificate = false`: + + ```terraform + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + + master_auth { + client_certificate_config { + issue_client_certificate = false + } + } + ``` + - uid: terraform-gcp-security-gke-no-basic-authentication + title: Clusters should not use basic authentication + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + blocks.where( type == "master_auth" ) { attributes['username']['value'] == null || attributes['username']['value'] == "" } + } + terraform.resources.where( nameLabel == "google_container_cluster" ) { + blocks.where( type == "master_auth" ) { attributes['password']['value'] == null || attributes['password']['value'] == "" } + } + docs: + desc: | + There are several methods of authenticating to the Kubernetes API server. In GKE, the supported methods are service account bearer tokens, OAuth tokens, and x509 client certificates. Prior to GKE's integration with OAuth, a one-time generated x509 certificate or static password were the only available authentication methods, but are now not recommended and should be disabled. These methods present a wider surface of attack for cluster compromise and have been disabled by default since GKE version 1.12. If you are using legacy authentication methods, we recommend that you turn them off. Authentication with a static password is deprecated and has been removed since GKE version 1.19. + + Existing clusters should move to OAuth. + audit: | + The following example will fail due to the `master_auth` block that includes the `username` and `password` configuration which is set to a value other than `""`: + + ```terraform + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + master_auth { + username = "kubeadmin" + password = var.cluster_password + } + } + ``` + remediation: | + The following example will pass since the `master_auth` block is not specified and secure defaults are used instead: + + ```terraform + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + } + ``` + + The following example will pass because the `master_auth` block is explicitly configuring basic auth to be disabled: + + ```terraform + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + remove_default_node_pool = true + initial_node_count = 1 + + master_auth { + username = "" + password = "" + client_certificate_config { + issue_client_certificate = false + } + } + ``` + - uid: terraform-gcp-security-gke-no-public-control-plane + title: GKE Control Plane should not be publicly accessible + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + arguments['master_authorized_networks_config'][0]['cidr_blocks'] { _['cidr_block'] != "0.0.0.0/0" } + } + docs: + desc: | + Authorized networks allow you to specify CIDR ranges and allow IP addresses in those ranges to access your cluster control plane endpoint using HTTPS. Exposing the Kubernetes control plane to the public internet by specifying a CIDR block of "0.0.0.0/0" is not recommended. Public clusters can have up to 50 authorized network CIDR ranges; private clusters can have up to 100. + audit: | + The following example will fail due to the `master_authorized_networks_config` block that specifies `cidr_block = "0.0.0.0/0"` which is publicly accessible: + + ```terraform + + resource "google_container_cluster" "primary" { + name = "my-gke-cluster" + location = "us-central1" + + remove_default_node_pool = true + initial_node_count = 1 + master_authorized_networks_config = [{ + cidr_blocks = [{ + cidr_block = "0.0.0.0/0" + display_name = "external" + }] + }] + } + ``` + remediation: | + The following example will pass since the `master_authorized_networks_config` block configures an internal `cidr_block`: + + ```terraform + + resource "google_container_cluster" "primary" { + name = "my-gke-cluster" + location = "us-central1" + + remove_default_node_pool = true + initial_node_count = 1 + master_authorized_networks_config = [{ + cidr_blocks = [{ + cidr_block = "10.10.128.0/24" + display_name = "internal" + }] + }] + } + + ``` + - uid: terraform-gcp-security-gke-node-metadata-security + title: Node metadata value disables metadata concealment + mql: | + terraform.resources.where( nameLabel == "google_container_node_pool" ) { + blocks.where( type == "node_config") { + blocks { attributes['node_metadata']['value'] != "EXPOSE" } + } + } + terraform.resources.where( nameLabel == "google_container_node_pool" ) { + blocks.where( type == "node_config") { + blocks { attributes['node_metadata']['value'] != "UNSPECIFIED" } + } + } + docs: + desc: | + GKE metadata concealment protects some potentially sensitive system metadata from user workloads running on your cluster. Metadata concealment is scheduled to be deprecated in the future and Google recommends using Workload Identity instead of metadata concealment. This check is looking for configuration that exposes metadata completely. + audit: | + The following example will fail due to the `node_config` block that specifies `node_metadata = "EXPOSE"`: + + ```terraform + + resource "google_container_node_pool" "bad_example" { + node_config { + workload_metadata_config { + node_metadata = "EXPOSE" + } + } + } + ``` + + The following example will fail due to the `node_config` block that specifies `node_metadata = "UNSPECIFIED"`: + + ```terraform + + resource "google_container_node_pool" "bad_example" { + node_config { + workload_metadata_config { + node_metadata = "UNSPECIFIED" + } + } + } + ``` + remediation: | + The following example will pass due to the `node_config` block that specifies `node_metadata = "GKE_METADATA_SERVER"` (recommended): + + ```terraform + + resource "google_container_node_pool" "bad_example" { + node_config { + workload_metadata_config { + node_metadata = "GKE_METADATA_SERVER" + } + } + } + + ``` + + The following example will pass due to the `node_config` block that specifies `node_metadata = "SECURE"`: + + ```terraform + + resource "google_container_node_pool" "bad_example" { + node_config { + workload_metadata_config { + node_metadata = "SECURE" + } + } + } + + ``` + - uid: terraform-gcp-security-gke-node-pool-uses-cos + title: Ensure Container-Optimized OS (cos) is used for Kubernetes Engine Clusters Node image + mql: | + terraform.resources.where( nameLabel == "google_container_node_pool" ) { + blocks.where( type == "node_config") { + attributes['image_type']['value'] == 'COS_CONTAINERD' + } + } + docs: + desc: | + GKE supports several OS image types but COS_CONTAINERD is the recommended OS image to use on cluster nodes for enhanced security. COS_CONTAINERD is the recommended OS image to use on cluster nodes. + audit: | + The following example will fail due to the `node_config` block that specifies `image_type = "something"`: + + ```terraform + + resource "google_container_node_pool" "bad_example" { + name = "my-node-pool" + cluster = google_container_cluster.primary.id + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + image_type = "something" + } + } + ``` + remediation: | + The following example will pass due to the `node_config` block that specifies `image_type = "COS_CONTAINERD"` (recommended): + + ```terraform + + resource "google_container_node_pool" "good_example" { + name = "my-node-pool" + cluster = google_container_cluster.primary.id + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + image_type = "COS_CONTAINERD" + } + } + + ``` + - uid: terraform-gcp-security-gke-node-shielding-enabled + title: Shielded GKE nodes not enabled + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + arguments['enable_shielded_nodes'] != false + } + docs: + desc: | + Node identity and integrity can't be verified without shielded GKE nodes. CIS GKE Benchmark Recommendation: 6.5.5. Shielded GKE Nodes provide strong, verifiable node identity and integrity to increase the security of GKE nodes and should be enabled on all GKE clusters. + + `enable_shielded_nodes` is an optional argument and is set to `true` by default, and should not be set to `false`. + audit: | + The following example will fail due to the `enable_shielded_nodes` is set to `false`: + + ```terraform + + resource "google_container_cluster" "bad_example" { + enable_shielded_nodes = "false" + } + ``` + remediation: | + The following example will pass due to the `enable_shielded_nodes` is set to `true`: + + ```terraform + + resource "google_container_cluster" "good_example" { + enable_shielded_nodes = "true" + } + + ``` + - uid: terraform-gcp-security-gke-use-cluster-labels + title: Clusters should be configured with Labels + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + arguments.keys.contains( "resource_labels" ) + } + docs: + desc: | + Cluster labels are key-value pairs that helps you organize your Google Cloud clusters. You can attach a label to each resource, then filter the resources based on their labels. Information about labels is forwarded to the billing system, so you can break down your billed charges by label. + + The `resource_labels` argument is optional when using the `google_container_cluster` resource. + audit: | + The following example will fail because the `resource_labels` argument is not defined for the cluster: + + ```terraform + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + remove_default_node_pool = true + initial_node_count = 1 + } + + ``` + remediation: | + The following example will pass because the `resource_labels` argument is defined for the cluster: + + ```terraform + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + resource_labels = { + "env" = "staging" + } + } + + ``` + - uid: terraform-gcp-security-gke-use-rbac-permissions + title: Legacy ABAC permissions are enabled + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + arguments['enable_legacy_abac'] != true + } + docs: + desc: | + By default, ABAC is disabled for clusters created using GKE version 1.8 and later. In Kubernetes, RBAC is used to grant permissions to resources at the cluster and namespace level. RBAC allows you to define roles with rules containing a set of permissions. RBAC has significant security advantages over ABAC. + + The `enable_legacy_abac` is set to `false` by default. + audit: | + The following example will fail because the `enable_legacy_abac` argument is set to `true`: + + ```terraform + + resource "google_container_cluster" "bad_example" { + enable_legacy_abac = true + } + + ``` + remediation: | + The following example will pass because the `enable_legacy_abac` argument is explicitly set to `false` (omitting the argument will also pass): + + ```terraform + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + enable_legacy_abac = false + } + + ``` + - uid: terraform-gcp-security-gke-use-service-account + title: Checks for service account defined for GKE nodes + mql: |- + terraform.resources.where( nameLabel == "google_container_cluster" ) { + blocks.where( type == "node_config" ) { + arguments.keys.contains("service_account") + } + } + terraform.resources.where( nameLabel == "google_container_node_pool" ) { + blocks.where( type == "node_config" ) { + arguments.keys.contains("service_account") + } + } + docs: + desc: | + Each GKE node has an Identity and Access Management (IAM) Service Account associated with it. By default, nodes are given the Compute Engine default service account, which you can find by navigating to the IAM section of the Cloud Console. This account has broad access by default, making it useful to wide variety of applications, but it has more permissions than are required to run your Kubernetes Engine cluster. You should create and use a minimally privileged service account for your nodes to use instead of the Compute Engine default service account. + audit: | + The following example will fail because the `node_config` block does not contain a `service_account` argument: + + ```terraform + + resource "google_container_cluster" "bad_example" { + name = "marcellus-wallace" + location = "us-central1-a" + initial_node_count = 3 + + node_config { + labels = { + foo = "bar" + } + tags = ["foo", "bar"] + } + timeouts { + create = "30m" + update = "40m" + } + } + + ``` + remediation: | + The following example will pass because the `node_config` block contains a `service_account` argument: + + ```terraform + + resource "google_container_cluster" "bad_example" { + name = "marcellus-wallace" + location = "us-central1-a" + initial_node_count = 3 + + node_config { + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + + labels = { + foo = "bar" + } + tags = ["foo", "bar"] + } + timeouts { + create = "30m" + update = "40m" + } + } + + ``` diff --git a/test/testdata/terraform/aws-3.xx/fail/aws-api-gw.tf b/test/testdata/terraform/aws-3.xx/fail/aws-api-gw.tf new file mode 100644 index 00000000..2e5007eb --- /dev/null +++ b/test/testdata/terraform/aws-3.xx/fail/aws-api-gw.tf @@ -0,0 +1,33 @@ +resource "aws_api_gateway_method_settings" "bad_example" { + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = aws_api_gateway_stage.example.stage_name + method_path = "path1/GET" + settings { + metrics_enabled = true + logging_level = "INFO" + cache_data_encrypted = false + } +} + +resource "aws_apigatewayv2_stage" "fail_example" { + api_id = aws_apigatewayv2_api.example.id + name = "example-stage" +} + +resource "aws_api_gateway_stage" "fail_example" { + stage_name = "production" + rest_api_id = aws_api_gateway_rest_api.test.id + deployment_id = aws_api_gateway_deployment.test.id + xray_tracing_enabled = false +} + +resource "aws_api_gateway_method" "fail_example" { + rest_api_id = aws_api_gateway_rest_api.SampleAPI.id + resource_id = aws_api_gateway_resource.SampleResource.id + http_method = "GET" + authorization = "NONE" +} + +resource "aws_api_gateway_domain_name" "fail_example" { + security_policy = "TLS_1_0" +} \ No newline at end of file diff --git a/test/testdata/terraform/aws-3.xx/fail/aws-ec2.tf b/test/testdata/terraform/aws-3.xx/fail/aws-ec2.tf new file mode 100644 index 00000000..ce51e4af --- /dev/null +++ b/test/testdata/terraform/aws-3.xx/fail/aws-ec2.tf @@ -0,0 +1,24 @@ +resource "aws_ebs_volume" "fail_example" { + availability_zone = "us-east-1" + size = 40 + tags = { + Name = "Not Encrypted" + } + encrypted = false +} + +resource "aws_instance" "fail_example" { + ami = "ami-0279c3b3186e54acd" + instance_type = "t2.micro" +} + +resource "aws_instance" "fail_example" { + ami = "ami-0279c3b3186e54acd" + instance_type = "t2.micro" + # we explicitly do not use the sample account + user_data = <