diff --git a/.changelog/3818.changed.txt b/.changelog/3818.changed.txt new file mode 100644 index 0000000000..21460fde5e --- /dev/null +++ b/.changelog/3818.changed.txt @@ -0,0 +1 @@ +feat: do not send any histogram or summary metric \ No newline at end of file diff --git a/deploy/helm/sumologic/README.md b/deploy/helm/sumologic/README.md index 7ccdbd9c2c..c2ff259c99 100644 --- a/deploy/helm/sumologic/README.md +++ b/deploy/helm/sumologic/README.md @@ -147,7 +147,7 @@ The following table lists the configurable parameters of the Sumo Logic chart an | `sumologic.metrics.collector.otelcol.config.merge` | Configuration for otelcol metrics collector, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | | `sumologic.metrics.collector.otelcol.config.override` | Configuration for otelcol metrics collector, replaces defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} | | `sumologic.metrics.collector.otelcol.targetAllocator.resources` | Resource requests and limits for Metrics Collector Target Allocator. | {} | -| `sumologic.metrics.dropHistogramBuckets` | Drop buckets from select high-cardinality histogram metrics, leaving only the sum and count components. | `true` | +| `sumologic.metrics.dropHistogramBuckets` | Drop buckets from histogram and summary metrics, leaving only the sum and count components. | `true` | | `sumologic.metrics.sourceType` | The type of the Sumo Logic source being used for metrics ingestion. Can be `http` or `otlp`. | `otlp` | | `sumologic.traces.enabled` | Set the enabled flag to true to enable tracing ingestion. _Tracing must be enabled for the account first. Please contact your Sumo representative for activation details_ | `true` | | `sumologic.traces.spans_per_request` | Maximum number of spans sent in single batch | `100` | diff --git a/deploy/helm/sumologic/conf/metrics/collector/otelcol/config.yaml b/deploy/helm/sumologic/conf/metrics/collector/otelcol/config.yaml index 3df5cd998a..f33c562462 100644 --- a/deploy/helm/sumologic/conf/metrics/collector/otelcol/config.yaml +++ b/deploy/helm/sumologic/conf/metrics/collector/otelcol/config.yaml @@ -57,8 +57,8 @@ processors: metric_statements: - context: metric statements: - - extract_sum_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") - - extract_count_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") + - extract_sum_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY + - extract_count_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY {{- end }} receivers: diff --git a/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml b/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml index 910adc29a0..f72d8c0a07 100644 --- a/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml +++ b/deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml @@ -16,7 +16,7 @@ filter/drop_unnecessary_metrics: - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") {{- if .Values.sumologic.metrics.dropHistogramBuckets }} # drop histograms we've extracted sums and counts from, but don't want the full thing - - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") + - type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket") {{- end }} # Prometheus receiver puts all labels in record-level attributes, and we need them in resource diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/additional_endpoints.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/additional_endpoints.output.yaml index ec860318a7..0dd0f94474 100644 --- a/tests/helm/testdata/goldenfile/metadata_metrics_otc/additional_endpoints.output.yaml +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/additional_endpoints.output.yaml @@ -44,7 +44,8 @@ data: metrics: metric: - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") - - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") + - type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM + or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket") groupbyattrs: keys: - container diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/basic.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/basic.output.yaml index cffd7c17d4..9723ea1b07 100644 --- a/tests/helm/testdata/goldenfile/metadata_metrics_otc/basic.output.yaml +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/basic.output.yaml @@ -44,7 +44,8 @@ data: metrics: metric: - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") - - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") + - type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM + or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket") groupbyattrs: keys: - container diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/custom.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/custom.output.yaml index d21eca4765..9ea3b748ab 100644 --- a/tests/helm/testdata/goldenfile/metadata_metrics_otc/custom.output.yaml +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/custom.output.yaml @@ -121,7 +121,8 @@ data: metrics: metric: - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") - - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") + - type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM + or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket") groupbyattrs: keys: - container diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug.output.yaml index 0982f4e5da..84800fcfa6 100644 --- a/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug.output.yaml +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug.output.yaml @@ -46,7 +46,8 @@ data: metrics: metric: - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") - - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") + - type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM + or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket") groupbyattrs: keys: - container diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock.output.yaml index 85bb94537c..a90af0b7f4 100644 --- a/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock.output.yaml +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock.output.yaml @@ -69,7 +69,8 @@ data: metrics: metric: - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") - - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") + - type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM + or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket") groupbyattrs: keys: - container diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock_http.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock_http.output.yaml index b8738cadd4..7ed2166af5 100644 --- a/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock_http.output.yaml +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/debug_with_sumologic_mock_http.output.yaml @@ -146,7 +146,8 @@ data: metrics: metric: - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") - - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") + - type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM + or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket") groupbyattrs: keys: - container diff --git a/tests/helm/testdata/goldenfile/metadata_metrics_otc/filtered_app_metrics.output.yaml b/tests/helm/testdata/goldenfile/metadata_metrics_otc/filtered_app_metrics.output.yaml index 2a540b3141..b35590bdd9 100644 --- a/tests/helm/testdata/goldenfile/metadata_metrics_otc/filtered_app_metrics.output.yaml +++ b/tests/helm/testdata/goldenfile/metadata_metrics_otc/filtered_app_metrics.output.yaml @@ -68,7 +68,8 @@ data: metrics: metric: - resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*") - - IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") + - type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM + or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket") groupbyattrs: keys: - container diff --git a/tests/helm/testdata/goldenfile/metrics_collector_otc/basic.output.yaml b/tests/helm/testdata/goldenfile/metrics_collector_otc/basic.output.yaml index d6d6915fb1..060e343bf0 100644 --- a/tests/helm/testdata/goldenfile/metrics_collector_otc/basic.output.yaml +++ b/tests/helm/testdata/goldenfile/metrics_collector_otc/basic.output.yaml @@ -115,8 +115,10 @@ spec: metric_statements: - context: metric statements: - - extract_sum_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") - - extract_count_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") + - extract_sum_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type + == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY + - extract_count_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type + == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY receivers: prometheus: config: diff --git a/tests/helm/testdata/goldenfile/metrics_collector_otc/debug.output.yaml b/tests/helm/testdata/goldenfile/metrics_collector_otc/debug.output.yaml index 79717f7786..e0b10d8ff2 100644 --- a/tests/helm/testdata/goldenfile/metrics_collector_otc/debug.output.yaml +++ b/tests/helm/testdata/goldenfile/metrics_collector_otc/debug.output.yaml @@ -117,8 +117,10 @@ spec: metric_statements: - context: metric statements: - - extract_sum_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") - - extract_count_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") + - extract_sum_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type + == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY + - extract_count_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type + == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY receivers: prometheus: config: diff --git a/tests/helm/testdata/goldenfile/metrics_collector_otc/kubelet.output.yaml b/tests/helm/testdata/goldenfile/metrics_collector_otc/kubelet.output.yaml index d777b6c750..64291fb19e 100644 --- a/tests/helm/testdata/goldenfile/metrics_collector_otc/kubelet.output.yaml +++ b/tests/helm/testdata/goldenfile/metrics_collector_otc/kubelet.output.yaml @@ -115,8 +115,10 @@ spec: metric_statements: - context: metric statements: - - extract_sum_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") - - extract_count_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$") + - extract_sum_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type + == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY + - extract_count_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type + == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY receivers: prometheus: config: diff --git a/tests/integration/internal/constants.go b/tests/integration/internal/constants.go index 140f5c34e2..91bbb0ec55 100644 --- a/tests/integration/internal/constants.go +++ b/tests/integration/internal/constants.go @@ -130,12 +130,10 @@ var ( "kubelet_runtime_operations_duration_seconds_sum", } KubeSchedulerMetrics = []string{ - "scheduler_scheduling_algorithm_duration_seconds_count", // not used by any App - "scheduler_scheduling_algorithm_duration_seconds_sum", // used by Kubernetes - Control Plane - "scheduler_scheduling_algorithm_duration_seconds_bucket", // not used by any App - "scheduler_framework_extension_point_duration_seconds_bucket", // not used by any App, probably will be used by Kubernetes - Control Plane - "scheduler_framework_extension_point_duration_seconds_count", // not used by any App, probably will be used by Kubernetes - Control Plane - "scheduler_framework_extension_point_duration_seconds_sum", // not used by any App, probably will be used by Kubernetes - Control Plane + "scheduler_scheduling_algorithm_duration_seconds_count", // not used by any App + "scheduler_scheduling_algorithm_duration_seconds_sum", // used by Kubernetes - Control Plane + "scheduler_framework_extension_point_duration_seconds_count", // not used by any App, probably will be used by Kubernetes - Control Plane + "scheduler_framework_extension_point_duration_seconds_sum", // not used by any App, probably will be used by Kubernetes - Control Plane } KubeApiServerMetrics = []string{ "apiserver_request_total", // used by Kubernetes - Control Plane @@ -146,10 +144,8 @@ var ( "etcd_mvcc_db_total_size_in_bytes", // not used by any App "etcd_debugging_store_expires_total", "etcd_debugging_store_watchers", - "etcd_disk_backend_commit_duration_seconds_bucket", "etcd_disk_backend_commit_duration_seconds_count", "etcd_disk_backend_commit_duration_seconds_sum", - "etcd_disk_wal_fsync_duration_seconds_bucket", "etcd_disk_wal_fsync_duration_seconds_count", "etcd_disk_wal_fsync_duration_seconds_sum", "etcd_grpc_proxy_cache_hits_total", @@ -237,12 +233,10 @@ var ( "otelcol_otelsvc_k8s_pod_table_size", "otelcol_otelsvc_k8s_pod_updated", "otelcol_processor_accepted_metric_points", - "otelcol_processor_batch_batch_send_size_bucket", "otelcol_processor_batch_batch_send_size_count", "otelcol_processor_batch_batch_send_size_sum", "otelcol_processor_batch_timeout_trigger_send", "otelcol_processor_dropped_metric_points", - "otelcol_processor_groupbyattrs_metric_groups_bucket", "otelcol_processor_groupbyattrs_metric_groups_count", "otelcol_processor_groupbyattrs_metric_groups_sum", "otelcol_processor_groupbyattrs_num_non_grouped_metrics", @@ -262,7 +256,6 @@ var ( "otelcol_processor_refused_log_records", "otelcol_processor_dropped_log_records", "otelcol_processor_groupbyattrs_num_grouped_logs", - "otelcol_processor_groupbyattrs_log_groups_bucket", "otelcol_processor_groupbyattrs_log_groups_count", "otelcol_processor_groupbyattrs_log_groups_sum", "otelcol_fileconsumer_reading_files", @@ -300,7 +293,6 @@ var ( "prometheus_remote_storage_samples_pending", "prometheus_remote_storage_samples_retried_total", "prometheus_remote_storage_samples_total", - "prometheus_remote_storage_sent_batch_duration_seconds_bucket", "prometheus_remote_storage_sent_batch_duration_seconds_count", "prometheus_remote_storage_sent_batch_duration_seconds_sum", "prometheus_remote_storage_shard_capacity", @@ -322,7 +314,6 @@ var ( "otelcol_http_server_response_content_length", "otelcol_http_server_request_content_length", "otelcol_http_server_duration_count", - "otelcol_http_server_duration_bucket", "otelcol_processor_batch_batch_size_trigger_send", "otelcol_processor_filter_datapoints_filtered", "otelcol_otelsvc_k8s_ip_lookup_miss", @@ -338,10 +329,8 @@ var ( // scheduler_scheduling_attempt_duration_seconds is present for K8s >=1.23 "scheduler_e2e_scheduling_duration_seconds_count", "scheduler_e2e_scheduling_duration_seconds_sum", - "scheduler_e2e_scheduling_duration_seconds_bucket", "scheduler_scheduling_attempt_duration_seconds_count", "scheduler_scheduling_attempt_duration_seconds_sum", - "scheduler_scheduling_attempt_duration_seconds_bucket", "cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile", "cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile", "target_info", @@ -376,7 +365,6 @@ var ( }, after: []string{ "coredns_proxy_request_duration_seconds_count", - "coredns_proxy_request_duration_seconds_bucket", "coredns_proxy_request_duration_seconds_sum", }, },