Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test(integration): Added custom pod label tests for global configuration attributes #3795

Merged
merged 1 commit into from
Jul 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .changelog/3795.changed.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
test: Added custom pod label tests for global configuration attributes
2 changes: 2 additions & 0 deletions deploy/helm/sumologic/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,7 @@ The following table lists the configurable parameters of the Sumo Logic chart an
| `prometheus-windows-exporter.enabled` | Set it to `true` to enable Prometheus Windows Exporter. It will gather metrics from Windows nodes. This is an experimental feature and may be subject of breaking changes. | `false` |
| `prometheus-windows-exporter` | Configuration for Prometheus Windows Exporter. [See external documentation.](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-windows-exporter#configuring) | See [values.yaml] |
| `falco.enabled` | Flag to control deploying Falco Helm sub-chart. | `false` |
| `falco.podLabels` | Used to set podLabels for falco | `{}` |
| `falco.fullnameOverride` | Used to override the chart's full name. | `Nil` |
| `falco.addKernelDevel` | Flag to control installation of `kernel-devel` on nodes using MachineConfig, required to build falco modules (only for OpenShift) | `true` |
| `falco.extra.initContainers` | InitContainers for Falco pod | See [values.yaml] |
Expand Down Expand Up @@ -329,6 +330,7 @@ The following table lists the configurable parameters of the Sumo Logic chart an
| `instrumentation.instrumentationJobImage.image.tag` | Name of the image tag used to apply Instrumentation resource | `2.24.0` |
| `opentelemetry-operator.admissionWebhooks` | Admission webhooks make sure only requests with correctly formatted rules will get into the Operator. They also enable the sidecar injection for OpenTelemetryCollector and Instrumentation CR's. | See [values.yaml] |
| `opentelemetry-operator.manager.env` | Additional environment variables for opentelemetry-operator helm chart. | `{"ENABLE_WEBHOOKS": "true"}` |
| `opentelemetry-operator.manager.podLabels` | Used to set podLabels for OpenTelemetry-Operator Manager. | `{}` |
| `opentelemetry-operator.kubeRBACProxy.image.repository` | Container repository for Kube RBAC Proxy. | `public.ecr.aws/sumologic/kube-rbac-proxy` |
| `opentelemetry-operator.testFramework.image.repository` | The default operator image repository for OpenTelemetry test framework. | `public.ecr.aws/sumologic/busybox` |
| `otelcolInstrumentation.enabled` | Enables Sumo Otel Distro Collector StatefulSet to collect telemetry data. [See docs for more information.](/docs/opentelemetry-collector/traces.md) | `true` |
Expand Down
3 changes: 3 additions & 0 deletions deploy/helm/sumologic/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2351,6 +2351,8 @@ telegraf-operator:
falco:
enabled: false

podLabels: {}

## Put here the new name if you want to override the full name used for Falco components.
# fullnameOverride: ""

Expand Down Expand Up @@ -2505,6 +2507,7 @@ opentelemetry-operator:
requests:
cpu: 150m
memory: 256Mi
podLabels: {}

kubeRBACProxy:
image:
Expand Down
96 changes: 85 additions & 11 deletions tests/helm/common_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ func TestOtelImageFIPSSuffix(t *testing.T) {
for _, renderedObject := range renderedObjects {
podSpec, err := GetPodSpec(renderedObject)
require.NoError(t, err)

if podSpec != nil {
for _, container := range podSpec.Containers {
if container.Name == otelContainerName {
Expand Down Expand Up @@ -369,52 +370,61 @@ func isSubchartObject(object metav1.Object) bool {
return false
}

// Get a PodSpec from the unstructured object, if possible
// This only works on Deployments, StatefulSets and DaemonSets
func GetPodSpec(object unstructured.Unstructured) (*corev1.PodSpec, error) {
podTemplateSpec, err := GetPodTemplateSpec(object)

if err != nil || podTemplateSpec == nil {
return nil, err
}

return &podTemplateSpec.Spec, nil
}

func GetPodTemplateSpec(object unstructured.Unstructured) (*corev1.PodTemplateSpec, error) {
switch object.GetKind() {
case "Deployment":
deployment := &appsv1.Deployment{}
err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.Object, &deployment)
if err != nil {
return nil, err
}
return &deployment.Spec.Template.Spec, nil
return &deployment.Spec.Template, nil
case "StatefulSet":
statefulset := &appsv1.StatefulSet{}
err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.Object, &statefulset)
if err != nil {
return nil, err
}
return &statefulset.Spec.Template.Spec, nil
return &statefulset.Spec.Template, nil
case "DaemonSet":
daemonset := &appsv1.DaemonSet{}
err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.Object, &daemonset)
if err != nil {
return nil, err
}
return &daemonset.Spec.Template.Spec, nil
return &daemonset.Spec.Template, nil
case "Job":
jobs := &batchv1.Job{}
err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.Object, &jobs)
job := &batchv1.Job{}
err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.Object, &job)
if err != nil {
return nil, err
}
return &jobs.Spec.Template.Spec, nil
return &job.Spec.Template, nil
case "CronJob":
jobs := &batchv1.CronJob{}
err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.Object, &jobs)
cronJob := &batchv1.CronJob{}
err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.Object, &cronJob)
if err != nil {
return nil, err
}
return &jobs.Spec.JobTemplate.Spec.Template.Spec, nil
return &cronJob.Spec.JobTemplate.Spec.Template, nil
default:
return nil, nil
}
}

func GetNodeSelector(object unstructured.Unstructured) (map[string]string, error) {
podSpec, err := GetPodSpec(object)

if err != nil {
return nil, err
} else if podSpec != nil {
Expand All @@ -434,6 +444,7 @@ func GetNodeSelector(object unstructured.Unstructured) (map[string]string, error
}
func GetAffinity(object unstructured.Unstructured) (*corev1.Affinity, error) {
podSpec, err := GetPodSpec(object)

if err != nil {
return nil, err
} else if podSpec != nil {
Expand All @@ -445,6 +456,7 @@ func GetAffinity(object unstructured.Unstructured) (*corev1.Affinity, error) {

func GetTolerations(object unstructured.Unstructured) ([]corev1.Toleration, error) {
podSpec, err := GetPodSpec(object)

if err != nil {
return nil, err
} else if podSpec != nil {
Expand Down Expand Up @@ -547,3 +559,65 @@ func TestServiceAccountPullSecrets(t *testing.T) {
})
}
}

func TestCustomPodLabels(t *testing.T) {
t.Parallel()
valuesFilePath := path.Join(testDataDirectory, "custom-podData.yaml")
renderedYamlString := RenderTemplate(
t,
&helm.Options{
ValuesFiles: []string{valuesFilePath},
SetStrValues: map[string]string{
"sumologic.accessId": "accessId",
"sumologic.accessKey": "accessKey",
},
Logger: logger.Discard,
},
chartDirectory,
releaseName,
[]string{},
true,
"--namespace",
defaultNamespace,
)

renderedObjects := UnmarshalMultipleFromYaml[unstructured.Unstructured](t, renderedYamlString)

for _, renderedObject := range renderedObjects {
podTemplateSpec, err := GetPodTemplateSpec(renderedObject)

if err != nil {
t.Logf("Error getting PodTemplateSpec for object %s: %v", renderedObject.GetName(), err)
continue
}

if podTemplateSpec == nil {
t.Logf("PodTemplateSpec is nil for object %s", renderedObject.GetName())
continue
}

require.NoError(t, err)

chan-tim-sumo marked this conversation as resolved.
Show resolved Hide resolved
labels := podTemplateSpec.Labels
labelValue, ok := labels[customLabelKey]

assert.True(
t,
ok,
"%s should have label %s",
renderedObject.GetName(),
customLabelKey,
)

assert.Equal(
t,
customLabelValue,
labelValue,
"%s should have label %s set to %s, found %s instead",
renderedObject.GetName(),
customLabelKey,
customLabelValue,
labelValue,
)
}
}
2 changes: 2 additions & 0 deletions tests/helm/const.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ const (
maxHelmReleaseNameLength = 22 // Helm allows up to 53, but for a name longer than 22 some statefulset names will be too long
k8sMaxNameLength = 253 // see https://kubernetes.io/docs/concepts/overview/working-with-objects/names/
k8sMaxLabelLength = 63 // see https://kubernetes.io/docs/concepts/overview/working-with-objects/names/
customLabelKey = "customLabelKey"
customLabelValue = "customLabelValue"
)

var subChartNames []string = []string{
Expand Down
36 changes: 36 additions & 0 deletions tests/helm/testdata/custom-podData.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
sumologic:
metrics:
remoteWriteProxy:
enabled: true

podLabels:
customLabelKey: customLabelValue

kube-prometheus-stack:
kube-state-metrics:
customLabels:
customLabelKey: customLabelValue
prometheus:
enabled: true
prometheusSpec:
podMetadata:
labels:
customLabelKey: customLabelValue
prometheus-node-exporter:
podLabels:
customLabelKey: customLabelValue

opentelemetry-operator:
manager:
podLabels:
customLabelKey: customLabelValue

falco:
enabled: true
podLabels:
customLabelKey: customLabelValue

prometheus-windows-exporter:
enabled: true
podLabels:
customLabelKey: customLabelValue
Loading