From 947d234ca6ac3564539ce651b254464f3445e5c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20=C5=9Awi=C4=85tek?= Date: Mon, 18 Sep 2023 19:09:41 +0200 Subject: [PATCH] chore(metrics): move unnecessary filter to metadata layer --- .../conf/metrics/collector/otelcol/config.yaml | 11 ----------- .../helm/sumologic/conf/metrics/otelcol/pipeline.yaml | 1 + .../sumologic/conf/metrics/otelcol/processors.yaml | 11 +++++++++++ .../additional_endpoints.output.yaml | 7 +++++++ .../goldenfile/metadata_metrics_otc/basic.output.yaml | 7 +++++++ .../metadata_metrics_otc/custom.output.yaml | 7 +++++++ .../filtered_app_metrics.output.yaml | 7 +++++++ .../metrics_collector_otc/basic.output.yaml | 9 --------- .../metrics_collector_otc/custom.output.yaml | 7 ------- 9 files changed, 40 insertions(+), 27 deletions(-) diff --git a/deploy/helm/sumologic/conf/metrics/collector/otelcol/config.yaml b/deploy/helm/sumologic/conf/metrics/collector/otelcol/config.yaml index 00c81dd3ec..eadb184e3c 100644 --- a/deploy/helm/sumologic/conf/metrics/collector/otelcol/config.yaml +++ b/deploy/helm/sumologic/conf/metrics/collector/otelcol/config.yaml @@ -50,16 +50,6 @@ processors: - extract_sum_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") - extract_count_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") {{- end }} - filter/drop_unnecessary_metrics: - error_mode: ignore - metrics: - metric: - # we let the metrics from annotations ("kubernetes-pods") through as they are - - resource.attributes["service.name"] != "pod-annotations" and IsMatch(name, "scrape_.*") -{{- if .Values.sumologic.metrics.dropHistogramBuckets }} - # drop histograms we've extracted sums and counts from, but don't want the full thing - - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") -{{- end }} receivers: prometheus: @@ -216,7 +206,6 @@ service: {{- if .Values.sumologic.metrics.dropHistogramBuckets }} - transform/extract_sum_count_from_histograms {{- end }} - - filter/drop_unnecessary_metrics - transform/drop_unnecessary_attributes receivers: [prometheus] diff --git a/deploy/helm/sumologic/conf/metrics/otelcol/pipeline.yaml b/deploy/helm/sumologic/conf/metrics/otelcol/pipeline.yaml index 51e1dc370d..65cf8b346b 100644 --- a/deploy/helm/sumologic/conf/metrics/otelcol/pipeline.yaml +++ b/deploy/helm/sumologic/conf/metrics/otelcol/pipeline.yaml @@ -27,6 +27,7 @@ processors: - transform/set_name - groupbyattrs/group_by_name - transform/remove_name + - filter/drop_unnecessary_metrics {{- if .Values.sumologic.metrics.enableDefaultFilters }} - filter/app_metrics {{- end }} diff --git a/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml b/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml index cf5ae4ea5e..e5e81dd25a 100644 --- a/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml +++ b/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml @@ -8,6 +8,17 @@ batch: ## Time duration after which a batch will be sent regardless of size timeout: 1s +filter/drop_unnecessary_metrics: + error_mode: ignore + metrics: + metric: + # we let the metrics from annotations ("kubernetes-pods") through as they are + - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") +{{- if .Values.sumologic.metrics.dropHistogramBuckets }} + # drop histograms we've extracted sums and counts from, but don't want the full thing + - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") +{{- end }} + # Prometheus receiver puts all labels in record-level attributes, and we need them in resource groupbyattrs: keys: diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/additional_endpoints.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/additional_endpoints.output.yaml index a5abb55f17..d4ed573dee 100644 --- a/tests/helm/testdata/goldenfile/metadata_metrics_otc/additional_endpoints.output.yaml +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/additional_endpoints.output.yaml @@ -107,6 +107,12 @@ data: send_batch_max_size: 2048 send_batch_size: 1024 timeout: 1s + filter/drop_unnecessary_metrics: + error_mode: ignore + metrics: + metric: + - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") + - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") groupbyattrs: keys: - container @@ -314,6 +320,7 @@ data: - transform/set_name - groupbyattrs/group_by_name - transform/remove_name + - filter/drop_unnecessary_metrics - batch - transform/prepare_routing - routing diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/basic.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/basic.output.yaml index 3b9623f528..143cfdd23c 100644 --- a/tests/helm/testdata/goldenfile/metadata_metrics_otc/basic.output.yaml +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/basic.output.yaml @@ -107,6 +107,12 @@ data: send_batch_max_size: 2048 send_batch_size: 1024 timeout: 1s + filter/drop_unnecessary_metrics: + error_mode: ignore + metrics: + metric: + - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") + - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") groupbyattrs: keys: - container @@ -312,6 +318,7 @@ data: - transform/set_name - groupbyattrs/group_by_name - transform/remove_name + - filter/drop_unnecessary_metrics - batch - transform/prepare_routing - routing diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/custom.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/custom.output.yaml index a33742ff3f..ac705f8edf 100644 --- a/tests/helm/testdata/goldenfile/metadata_metrics_otc/custom.output.yaml +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/custom.output.yaml @@ -37,6 +37,12 @@ data: send_batch_max_size: 2048 send_batch_size: 1024 timeout: 1s + filter/drop_unnecessary_metrics: + error_mode: ignore + metrics: + metric: + - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") + - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") groupbyattrs: keys: - container @@ -186,6 +192,7 @@ data: - transform/set_name - groupbyattrs/group_by_name - transform/remove_name + - filter/drop_unnecessary_metrics - batch - transform/drop_routing_attribute receivers: diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/filtered_app_metrics.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/filtered_app_metrics.output.yaml index b0f266c6f4..74af2b456d 100644 --- a/tests/helm/testdata/goldenfile/metadata_metrics_otc/filtered_app_metrics.output.yaml +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/filtered_app_metrics.output.yaml @@ -131,6 +131,12 @@ data: - IsMatch(name, "activemq_.*") and not IsMatch(name, "(?:activemq_(topic_.*|queue_.*|.*_QueueSize|broker_(AverageMessageSize|CurrentConnectionsCount|MemoryLimit|StoreLimit|TempLimit|TotalConnectionsCount|TotalConsumerCount|TotalDequeueCount|TotalEnqueueCount|TotalMessageCount|TotalProducerCount|UptimeMillis)|jvm_memory_(HeapMemoryUsage_max|HeapMemoryUsage_used|NonHeapMemoryUsage_used)|jvm_runtime_Uptime|OperatingSystem_(FreePhysicalMemorySize|SystemCpuLoad|TotalPhysicalMemorySize)))") - IsMatch(name, "couchbase_.*") and not IsMatch(name, "(?:couchbase_(node_.*|bucket_(ep_.*|vb_.*|delete_.*|cmd.*|bytes_.*|item_count|curr_connections|ops_per_sec|disk_write_queue|mem_.*|cas_hits|ops|curr_items|cpu_utilization_rate|swap_used|disk_used|rest_requests|hibernated_waked|xdc_ops)))") - IsMatch(name, "squid_.*") and not IsMatch(name, "(?:squid_(uptime|cache(Ip(Entries|Requests|Hits)|Fqdn(Entries|Requests|Misses|NegativeHits)|Dns(Requests|Replies|SvcTime5)|Sys(PageFaults|NumReads)|Current(FileDescrCnt|UnusedFDescrCnt|ResFileDescrCnt)|Server(Requests|InKb|OutKb)|Http(AllSvcTime5|Errors|InKb|OutKb|AllSvcTime1)|Mem(MaxSize|Usage)|NumObjCount|CpuTime|MaxResSize|ProtoClientHttpRequests|Clients)))") + filter/drop_unnecessary_metrics: + error_mode: ignore + metrics: + metric: + - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") + - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") groupbyattrs: keys: - container @@ -336,6 +342,7 @@ data: - transform/set_name - groupbyattrs/group_by_name - transform/remove_name + - filter/drop_unnecessary_metrics - filter/app_metrics - batch - transform/prepare_routing diff --git a/tests/helm/testdata/goldenfile/metrics_collector_otc/basic.output.yaml b/tests/helm/testdata/goldenfile/metrics_collector_otc/basic.output.yaml index bcebf2c2ad..9657623bfe 100644 --- a/tests/helm/testdata/goldenfile/metrics_collector_otc/basic.output.yaml +++ b/tests/helm/testdata/goldenfile/metrics_collector_otc/basic.output.yaml @@ -111,14 +111,6 @@ spec: statements: - extract_sum_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") - extract_count_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") - filter/drop_unnecessary_metrics: - error_mode: ignore - metrics: - metric: - # we let the metrics from annotations ("kubernetes-pods") through as they are - - resource.attributes["service.name"] != "pod-annotations" and IsMatch(name, "scrape_.*") - # drop histograms we've extracted sums and counts from, but don't want the full thing - - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") receivers: prometheus: @@ -264,6 +256,5 @@ spec: processors: - batch - transform/extract_sum_count_from_histograms - - filter/drop_unnecessary_metrics - transform/drop_unnecessary_attributes receivers: [prometheus] diff --git a/tests/helm/testdata/goldenfile/metrics_collector_otc/custom.output.yaml b/tests/helm/testdata/goldenfile/metrics_collector_otc/custom.output.yaml index bc53d63110..7519b673bf 100644 --- a/tests/helm/testdata/goldenfile/metrics_collector_otc/custom.output.yaml +++ b/tests/helm/testdata/goldenfile/metrics_collector_otc/custom.output.yaml @@ -123,12 +123,6 @@ spec: # we drop them to make the rest of our pipeline easier to reason about # after the collector and metadata are merged, consider using them instead of k8sattributes processor - delete_matching_keys(attributes, "k8s.*") - filter/drop_unnecessary_metrics: - error_mode: ignore - metrics: - metric: - # we let the metrics from annotations ("kubernetes-pods") through as they are - - resource.attributes["service.name"] != "pod-annotations" and IsMatch(name, "scrape_.*") receivers: prometheus: @@ -156,6 +150,5 @@ spec: exporters: [otlphttp] processors: - batch - - filter/drop_unnecessary_metrics - transform/drop_unnecessary_attributes receivers: [prometheus]