Skip to content

Commit

Permalink
Helm chart update: 2.30.0
Browse files Browse the repository at this point in the history
  • Loading branch information
helm authored and helm committed Jun 6, 2024
1 parent 57a5740 commit 02e0f2d
Show file tree
Hide file tree
Showing 8 changed files with 196 additions and 95 deletions.
5 changes: 3 additions & 2 deletions checkpoint/cloudguard/Chart.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 2.29.0
appVersion: 2.30.0
description: A Helm chart for Check Point CloudGuard Workload Security
home: https://portal.checkpoint.com
icon: https://www.checkpoint.com/wp-content/uploads/icon-cloudguard-nav.png
Expand Down Expand Up @@ -28,10 +28,11 @@ keywords:
- ecs
- tanzu
- k3s
- rke2
- openshift
- eks
- aks
- gke
- autopilot
name: cloudguard
version: 2.29.0
version: 2.30.0
2 changes: 1 addition & 1 deletion checkpoint/cloudguard/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ The following table list the configurable parameters of this chart and their def
| `proxy` | Proxy settings (e.g. http://my-proxy.com:8080) | `{}` |
| `containerRuntime` | Container runtime (docker/containerd/cri-o) overriding auto-detection | `` |
| `containerRuntimeSocket` | Container runtime socket path overriding auto-detection | `` |
| `platform` | Kubernetes platform (kubernetes/ tanzu/ openshift/ openshift.v3/ eks/ eks.bottlerocket/ gke.cos/ gke.autopilot/ k3s/ kubernetes.coreos) overriding auto-detection | `kubernetes` |
| `platform` | Kubernetes platform (kubernetes/ tanzu/ openshift/ openshift.v3/ eks/ eks.bottlerocket/ gke.cos/ gke.autopilot/ k3s/ rke2/ kubernetes.coreos) overriding auto-detection | `kubernetes` |
| `seccompProfile` | Computer Security facility profile. (to be used in kubernetes 1.19 and up) | `RuntimeDefault` |
| `podAnnotations.seccomp` | Computer Security facility profile. (to be used in kubernetes below 1.19) | `runtime/default` |
| `podAnnotations.apparmor` | Apparmor Linux kernel security module profile. | `{}` |
Expand Down
12 changes: 6 additions & 6 deletions checkpoint/cloudguard/defaults.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ podAnnotations:
proxy: {}

containerRuntime:
platform: kubernetes # kubernetes, openshift, openshift.v3, tanzu, eks, eks.bottlerocket, gke.cos, gke.autopilot or k3s
platform: kubernetes # kubernetes, openshift, openshift.v3, tanzu, eks, eks.bottlerocket, gke.cos, gke.autopilot, k3s or rke2

seccompProfile:
type: RuntimeDefault
Expand Down Expand Up @@ -112,7 +112,7 @@ addons:
priorityClassName: "system-node-critical"
## Specify image and tag
image: checkpoint/consec-imagescan-daemon
tag: 2.29.0
tag: 2.30.0

## Specify existing service account name ("" to create)
serviceAccountName: ""
Expand All @@ -134,7 +134,7 @@ addons:
shim:
## Specify image and tag
image: checkpoint/consec-imagescan-shim
tag: 2.29.0
tag: 2.30.0

## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
Expand Down Expand Up @@ -170,7 +170,7 @@ addons:
engine:
## Specify image and tag
image: checkpoint/consec-imagescan-engine
tag: 2.29.0
tag: 2.30.0

## Specify existing service account name ("" to create)
serviceAccountName: ""
Expand Down Expand Up @@ -203,7 +203,7 @@ addons:
list:
## Specify image and tag
image: checkpoint/consec-imagescan-engine
tag: 2.29.0
tag: 2.30.0

## Specify existing service account name ("" to create)
serviceAccountName: ""
Expand Down Expand Up @@ -346,7 +346,7 @@ addons:
enforcer:
## Specify image and tag
image: checkpoint/consec-admission-enforcer
tag: 2.11.0
tag: 2.12.0

## Specify existing service account name ("" to create)
serviceAccountName: ""
Expand Down
112 changes: 81 additions & 31 deletions checkpoint/cloudguard/templates/_helpers.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -76,13 +76,16 @@

{{- /* Labels commonly used in our k8s resources */ -}}
{{- define "common.labels" -}}
app.kubernetes.io/name: {{ template "agent.resource.name" . }}
app.kubernetes.io/name: {{ include "agent.resource.name" . }}
app.kubernetes.io/instance: {{ include "name.prefix" . }}
{{- end -}}

{{- /* Labels commonly used in our "pod group" resources */ -}}
{{- define "common.labels.with.chart" -}}
helm.sh/chart: {{ printf "%s-%s" .Chart.name .Chart.version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
app.kubernetes.io/version: {{ $.Chart.appVersion }}
app.created.by.template: {{ (include "is.helm.template.command" .) | quote }}
{{ template "common.labels" . }}
{{- end -}}

Expand Down Expand Up @@ -258,13 +261,13 @@ key: {{ $cert.Key | b64enc }}
{{- printf "{\"auths\":{\"%s\":{\"auth\":\"%s\"}}}" .Values.imageRegistry.url (printf "%s:%s" $user $pass | b64enc) | b64enc -}}
{{- end -}}

{{- /* validate containerRuntime is one of the allowed values.
{{- /* validate containerRuntime is one of the supported values.
takes a context (such as $config, .Values or (dict "containerRuntime" $containerRuntime)) that has a .containerRuntime field */ -}}
{{- define "validate.container.runtime" -}}
{{- $allowedRuntimes := list "docker" "containerd" "cri-o" -}}
{{- if has (.containerRuntime | lower) $allowedRuntimes -}}
{{- $supportedRuntimes := (include "supported.containerRuntimes" .) | splitList " " -}}
{{- if has (.containerRuntime | lower) $supportedRuntimes -}}
{{- else -}}
{{- $err := printf "\n\nERROR: Invalid containerRuntime: %s (should be one of: %s)" .containerRuntime $allowedRuntimes -}}
{{- $err := printf "\n\nERROR: Invalid containerRuntime: %s (should be one of: %s)" .containerRuntime $supportedRuntimes -}}
{{- fail $err -}}
{{- end -}}
{{- end -}}
Expand All @@ -286,27 +289,40 @@ takes a context (such as $config, .Values or (dict "containerRuntime" $container
{{- end -}}
{{- /* get the first node from the cluster */ -}}
{{- define "get.first.node" -}}
{{- $nodes := lookup "v1" "Node" "" "" -}}
{{- if empty $nodes -}}
{{- else if eq (len $nodes.items) 0 -}}
{{- else -}}
{{- first $nodes.items | toYaml -}}
{{- end -}}
{{- end -}}
{{- define "get.container.runtime" -}}
{{- if .Values.containerRuntime -}}
{{- include "validate.container.runtime" .Values -}}
{{ .Values.containerRuntime | lower }}
{{- else -}}
{{- $nodes := lookup "v1" "Node" "" "" -}}
{{- if ne (len $nodes) 0 -}}
{{/* examples for runtime version: docker://19.3.3, containerd://1.3.3, cri-o://1.20.3 */}}
{{- $containerRuntimeVersion := (first $nodes.items).status.nodeInfo.containerRuntimeVersion -}}
{{- $containerRuntime := first (regexSplit ":" $containerRuntimeVersion -1) -}}
{{- include "validate.container.runtime" (dict "containerRuntime" $containerRuntime) -}}
{{- if .Values.containerRuntime -}}
{{- include "validate.container.runtime" .Values -}}
{{ .Values.containerRuntime | lower }}
{{- else -}}
{{- $noRuntimeErr := "\n\nERROR: No nodes found, cannot identify container runtime. Use '--set containerRuntime=docker' or '--set containerRuntime=containerd' or '--set containerRuntime=cri-o'" -}}
{{- $firstNode := include "get.first.node" . | fromYaml -}}
{{- if empty $firstNode -}}
{{- fail $noRuntimeErr -}}
{{- end -}}
{{/* examples for runtime version: docker://19.3.3, containerd://1.3.3, cri-o://1.20.3 */}}
{{- $containerRuntimeVersion := $firstNode.status.nodeInfo.containerRuntimeVersion -}}
{{- $containerRuntime := first (regexSplit ":" $containerRuntimeVersion -1) -}}
{{- include "validate.container.runtime" (dict "containerRuntime" $containerRuntime) -}}
{{ $containerRuntime | lower }}
{{- else -}}
{{- fail "\n\nERROR: No nodes found, cannot identify container runtime. Use '--set containerRuntime=docker' or '--set containerRuntime=containerd' or '--set containerRuntime=cri-o'" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- /* get platform value, if not provided, try to infer it from the first node */ -}}
{{- define "get.platform" -}}
{{- /* use platform value if it's a helm template command or when the provided value is not the default kubernetes */ -}}
{{- if or (include "is.helm.template.command" .) (and .Values.platform (ne .Values.platform "kubernetes")) -}}
{{- if or (eq (include "is.helm.template.command" .) "true") (and .Values.platform (ne .Values.platform "kubernetes")) -}}
{{- include "validate.platform" .Values -}}
{{- lower .Values.platform -}}
{{- else if has "config.openshift.io/v1" .Capabilities.APIVersions -}}
Expand All @@ -318,14 +334,18 @@ takes a context (such as $config, .Values or (dict "containerRuntime" $container
{{/* else if has "auto.gke.io/v1" .Capabilities.APIVersions */}}
{{/* printf "gke.autopilot" */}}
{{- else -}}
{{- $nodes := lookup "v1" "Node" "" "" -}}
{{- $supportedPlatforms := (include "supported.platforms" .) | splitList " " -}}
{{- $noPlatformErr := printf "\n\nERROR: No nodes found, cannot identify platform. Append '--set platform=<platform>', platform should be one of %s" $supportedPlatforms -}}
{{- $firstNode := include "get.first.node" . | fromYaml -}}
{{- if empty $firstNode -}}
{{- fail $noPlatformErr -}}
{{- end -}}
{{- $osImage := $firstNode.status.nodeInfo.osImage -}}
{{/*
nodeInfo.osImage example values:
- "Bottlerocket OS 1.7.2 (aws-k8s-1.21)"
- "Container-Optimized OS from Google"
*/}}
{{- $firstNode := (first $nodes.items) -}}
{{- $osImage := $firstNode.status.nodeInfo.osImage -}}
{{- if contains "Bottlerocket" $osImage -}}
{{- printf "eks.bottlerocket" -}}
{{- else if contains "Container-Optimized" $osImage -}}
Expand All @@ -334,6 +354,8 @@ takes a context (such as $config, .Values or (dict "containerRuntime" $container
{{- printf "kubernetes.coreos" -}}
{{- else if hasKey $firstNode.metadata.annotations "k3s.io/hostname" -}}
{{- printf "k3s" -}}
{{- else if hasKey $firstNode.metadata.annotations "rke2.io/hostname" -}}
{{- printf "rke2" -}}
{{- else if or (hasKey $firstNode.metadata.labels "eks.amazonaws.com/nodegroup") (hasKey $firstNode.metadata.labels "alpha.eksctl.io/nodegroup-name") (hasKey $firstNode.metadata.labels "eks.amazonaws.com/compute-type") -}}
{{- printf "eks" -}}
{{- else -}}
Expand Down Expand Up @@ -390,16 +412,19 @@ If a user opts for the default "preserve" option:
{{- end -}}
{{- end -}}
{{- /*
use to know if we run from template (which mean wo have no connection to the cluster and cannot check Capabilities/nodes etc.)
if there is no namespace probably we are running template
returns string value "true" or "false"
usage:
`{{- if eq (include "is.helm.template.command") "true" -}}`
*/ -}}
{{- define "is.helm.template.command" -}}
{{- if not (hasKey .Values "isHelmTemplateCache") -}}
{{- $namespace := lookup "v1" "Namespace" "" "" -}}
{{- if eq (len $namespace) 0 -}}
true
{{- $_ := set .Values "isHelmTemplateCache" (eq (len $namespace) 0) -}}
{{- end -}}
{{- .Values.isHelmTemplateCache | toYaml -}}
{{- end -}}
{{- define "containerd.sock.path" -}}
Expand All @@ -412,20 +437,28 @@ true
{{ printf (.Values.containerRuntimeSocket | toString) }}
{{- else if eq .platform "eks.bottlerocket" -}}
{{- printf "/run/dockershim.sock" -}}
{{- else if eq .platform "k3s" -}}
{{- else if has .platform (list "k3s" "rke2") -}}
{{- printf "/run/k3s/containerd/containerd.sock" -}}
{{- else -}}
{{- printf "/run/containerd/containerd.sock" -}}
{{- end -}}
{{- end -}}
{{- /* validate platform is one of the allowed values.
{{- define "containerd.runtime.v2.task" -}}
{{- if has .platform (list "k3s" "rke2") -}}
{{- printf "/run/k3s/containerd/io.containerd.runtime.v2.task" -}}
{{- else -}}
{{- printf "/run/containerd/io.containerd.runtime.v2.task" -}}
{{- end -}}
{{- end -}}
{{- /* validate platform is one of the supported values.
takes a context (such as $config or .Values) that has a .platform field */ -}}
{{- define "validate.platform" -}}
{{- $allowedPlatforms := list "kubernetes" "tanzu" "openshift" "openshift.v3" "eks" "eks.bottlerocket" "gke.cos" "gke.autopilot" "k3s" "kubernetes.coreos" -}}
{{- if has (.platform | lower) $allowedPlatforms -}}
{{- $supportedPlatforms := (include "supported.platforms" .) | splitList " " -}}
{{- if has (.platform | lower) $supportedPlatforms -}}
{{- else -}}
{{- $err := printf "\n\nERROR: Invalid platform: %s (should be one of: %s)" .platform $allowedPlatforms -}}
{{- $err := printf "\n\nERROR: Invalid platform: %s, should be one of: %s" .platform $supportedPlatforms -}}
{{- fail $err -}}
{{- end -}}
{{- end -}}
Expand Down Expand Up @@ -517,3 +550,20 @@ nodeAffinity:
- {{$labelValue}}
{{- end -}}
{{- end -}}
{{- /* list of supported platforms
usage:
`{{- $supportedPlatforms := (include "supported.platforms" .) | splitList " " -}}`
*/ -}}
{{- define "supported.platforms" -}}
kubernetes kubernetes.coreos tanzu openshift openshift.v3 eks eks.bottlerocket gke.cos gke.autopilot k3s rke2
{{- end -}}
{{- /* list of supported containter runtimes
usage:
`{{- $supportedRuntimes := (include "supported.containerRuntimes" .) | splitList " " -}}`
*/ -}}
{{- define "supported.containerRuntimes" -}}
docker containerd cri-o
{{- end -}}
16 changes: 12 additions & 4 deletions checkpoint/cloudguard/templates/runtime/daemon/daemonset.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,21 +31,29 @@ spec:
{{- end }}
initContainers:
{{- if and $config.featureConfig.enableFileReputation (eq $config.containerRuntime "cri-o") }}
# cri-o folder which contains containers root fs is 'private' mount.
# cri-o folder which contains containers root fs can be mounted privately.
# it depends on variable skip_mount_home in /etc/containers/storage.conf [storage.options.overlay]
# if skip_mount_home==false, private mount will be created.
# So nested/sub mounts will not be propagated to WorkloadSecurity container.
# The following container changes mount type to 'shared' on host in a hacky way:
# Literally container escapes its mount's isolation by changing mount namespace
# to 'init' process namespace, then changes the type of the required mount to 'shared'
- name: criofsfix
image: docker.io/library/alpine:3.16
image: docker.io/library/busybox:1
imagePullPolicy: IfNotPresent
command: [ "/bin/sh", "-c", "--" ]
securityContext:
privileged: true
{{- if (eq $config.platform "kubernetes.coreos") }}
runAsUser: 0
{{- end }}
args: [ "nsenter --mount=/proc/1/ns/mnt -- mount --make-shared /var/lib/containers/storage/overlay" ]
args:
- |
# check that /var/lib/containers/storage/overlay is a mount point
if [ $(nsenter --mount=/proc/1/ns/mnt -- grep -Fc ' /var/lib/containers/storage/overlay ' /proc/self/mountinfo) -gt 0 ];
then
nsenter --mount=/proc/1/ns/mnt -- mount --make-shared /var/lib/containers/storage/overlay;
fi
{{- end }}
# probe (sysdig)
- {{ $containerConfig := merge $config (dict "containerName" "probe") -}}
Expand Down Expand Up @@ -323,7 +331,7 @@ spec:
type: Socket
- name: runcontainerdfs-vol
hostPath:
path: /run/containerd/io.containerd.runtime.v2.task # TODO deal with other flavors like bottlerocket
path: {{ include "containerd.runtime.v2.task" $config }}
{{- end }}
{{- if eq $config.containerRuntime "cri-o" }}
- name: crio-sock
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ spec:
{{- else if eq $config.containerRuntime "containerd" }}
- pathPrefix: {{ include "containerd.sock.path" $config }}
readOnly: true
- pathPrefix: /run/containerd/io.containerd.runtime.v2.task
- pathPrefix: {{ include "containerd.runtime.v2.task" $config }}
readOnly: true
{{- else if eq $config.containerRuntime "cri-o" }}
- pathPrefix: /var/run/crio/crio.sock
Expand Down
Binary file added repository/cloudguard-2.30.0.tgz
Binary file not shown.
Loading

0 comments on commit 02e0f2d

Please sign in to comment.