From bf40597db1727e3733d1abaeec6e8e2fcfe7d98d Mon Sep 17 00:00:00 2001 From: Raj Nishtala Date: Wed, 27 Nov 2024 18:51:55 -0500 Subject: [PATCH] Remove the sumologic exporter after deprecation. The upstream sumologic exporter will be used --- .changelog/1714.changed.txt | 1 + pkg/exporter/sumologicexporter/Makefile | 1 - pkg/exporter/sumologicexporter/README.md | 208 --- pkg/exporter/sumologicexporter/config.go | 209 --- pkg/exporter/sumologicexporter/config_test.go | 111 -- .../sumologicexporter/deduplicate_errors.go | 60 - .../deduplicate_errors_test.go | 69 - pkg/exporter/sumologicexporter/exporter.go | 493 ------ .../sumologicexporter/exporter_test.go | 660 --------- pkg/exporter/sumologicexporter/factory.go | 101 -- .../sumologicexporter/factory_test.go | 60 - pkg/exporter/sumologicexporter/fields.go | 99 -- pkg/exporter/sumologicexporter/fields_test.go | 99 -- pkg/exporter/sumologicexporter/filter.go | 76 - pkg/exporter/sumologicexporter/filter_test.go | 66 - pkg/exporter/sumologicexporter/go.mod | 86 -- pkg/exporter/sumologicexporter/go.sum | 280 ---- .../internal/observability/observability.go | 137 -- .../observability/observability_test.go | 188 --- pkg/exporter/sumologicexporter/otlp.go | 160 -- pkg/exporter/sumologicexporter/otlp_test.go | 181 --- .../sumologicexporter/prometheus_formatter.go | 422 ------ .../prometheus_formatter_test.go | 227 --- pkg/exporter/sumologicexporter/sender.go | 779 ---------- pkg/exporter/sumologicexporter/sender_test.go | 1316 ----------------- .../sumologicexporter/test_data_test.go | 322 ---- 26 files changed, 1 insertion(+), 6410 deletions(-) create mode 100644 .changelog/1714.changed.txt delete mode 100644 pkg/exporter/sumologicexporter/Makefile delete mode 100644 pkg/exporter/sumologicexporter/README.md delete mode 100644 pkg/exporter/sumologicexporter/config.go delete mode 100644 pkg/exporter/sumologicexporter/config_test.go delete mode 100644 pkg/exporter/sumologicexporter/deduplicate_errors.go delete mode 100644 pkg/exporter/sumologicexporter/deduplicate_errors_test.go delete mode 100644 pkg/exporter/sumologicexporter/exporter.go delete mode 100644 pkg/exporter/sumologicexporter/exporter_test.go delete mode 100644 pkg/exporter/sumologicexporter/factory.go delete mode 100644 pkg/exporter/sumologicexporter/factory_test.go delete mode 100644 pkg/exporter/sumologicexporter/fields.go delete mode 100644 pkg/exporter/sumologicexporter/fields_test.go delete mode 100644 pkg/exporter/sumologicexporter/filter.go delete mode 100644 pkg/exporter/sumologicexporter/filter_test.go delete mode 100644 pkg/exporter/sumologicexporter/go.mod delete mode 100644 pkg/exporter/sumologicexporter/go.sum delete mode 100644 pkg/exporter/sumologicexporter/internal/observability/observability.go delete mode 100644 pkg/exporter/sumologicexporter/internal/observability/observability_test.go delete mode 100644 pkg/exporter/sumologicexporter/otlp.go delete mode 100644 pkg/exporter/sumologicexporter/otlp_test.go delete mode 100644 pkg/exporter/sumologicexporter/prometheus_formatter.go delete mode 100644 pkg/exporter/sumologicexporter/prometheus_formatter_test.go delete mode 100644 pkg/exporter/sumologicexporter/sender.go delete mode 100644 pkg/exporter/sumologicexporter/sender_test.go delete mode 100644 pkg/exporter/sumologicexporter/test_data_test.go diff --git a/.changelog/1714.changed.txt b/.changelog/1714.changed.txt new file mode 100644 index 0000000000..cf57e1ec61 --- /dev/null +++ b/.changelog/1714.changed.txt @@ -0,0 +1 @@ +chore: Upgrade otel core to v114 \ No newline at end of file diff --git a/pkg/exporter/sumologicexporter/Makefile b/pkg/exporter/sumologicexporter/Makefile deleted file mode 100644 index ded7a36092..0000000000 --- a/pkg/exporter/sumologicexporter/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../../Makefile.Common diff --git a/pkg/exporter/sumologicexporter/README.md b/pkg/exporter/sumologicexporter/README.md deleted file mode 100644 index cbe4bfe223..0000000000 --- a/pkg/exporter/sumologicexporter/README.md +++ /dev/null @@ -1,208 +0,0 @@ -# Sumo Logic Exporter - -**Stability level**: Deprecated - -This extension is deprecated in favor of the [Sumo Logic exporter][sumologic_exporter_docs] that lives in the [OpenTelemetry Collector Contrib][contrib_repo] repository. - -[sumologic_exporter_docs]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/sumologicexporter/README.md -[contrib_repo]: https://github.com/open-telemetry/opentelemetry-collector-contrib/ - -This exporter supports sending logs and metrics data to [Sumo Logic](https://www.sumologic.com/). - -We strongly recommend to use this exporter with [sumologicextension](../../extension/sumologicextension/README.md). - -Configuration is specified via the yaml in the following structure: - -```yaml -exporters: - # ... - sumologic: - # unique URL generated for your HTTP Source, this is the address to send data to - # deprecated, please use sumologicextension to manage your endpoints - # if sumologicextension is not being used, the endpoint is required - endpoint: - # Compression encoding format, empty string means no compression, default = gzip - # DEPRECATION NOTICE: compress_encoding (reason: use compression) - compress_encoding: {gzip, deflate, ""} - # Compression encoding format, empty string means no compression, default = gzip - compression: {gzip, zstd, deflate, ""} - # max HTTP request body size in bytes before compression (if applied), - # NOTE: this limit does not apply to data sent in otlp format, - # to limit size of otlp requests, please use the batch processor: - # https://github.com/open-telemetry/opentelemetry-collector/tree/v0.103.0/processor/batchprocessor - # default = 1_048_576 (1MB) - max_request_body_size: - - # format to use when sending logs to Sumo Logic, default = otlp, - # NOTE: only `otlp` is supported when used with sumologicextension - log_format: {json, text, otlp} - - # format to use when sending metrics to Sumo Logic, default = otlp, - # NOTE: only `otlp` is supported when used with sumologicextension - metric_format: {otlp, prometheus} - - # Decompose OTLP Histograms into individual metrics, similar to how they're represented in Prometheus format. - # The Sumo OTLP source currently doesn't support Histograms, and they are quietly dropped. This option produces - # metrics similar to when metric_format is set to prometheus. - # default = false - decompose_otlp_histograms: {true, false} - - # format to use when sending traces to Sumo Logic, - # currently only otlp is supported - trace_format: {otlp} - - # timeout is the timeout for every attempt to send data to the backend, - # maximum connection timeout is 55s, default = 30s - timeout: - - # defines client name used for Sumo Logic statistics - # default = "otelcol" - client: - - # instructs sumologicexporter to use an edpoint automatically generated by - # sumologicextension; - # to use direct endpoint, set it `auth` to `null` and set the endpoint configuration - # option; - # see sumologicextension documentation for details - # default = sumologic - auth: - authenticator: - - # for below described queueing and retry related configuration please refer to: - # https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md#configuration - - retry_on_failure: - # default = true - enabled: {true, false} - # time to wait after the first failure before retrying; - # ignored if enabled is false, default = 5s - initial_interval: - # is the upper bound on backoff; ignored if enabled is false, default = 30s - max_interval: - # is the maximum amount of time spent trying to send a batch; - # ignored if enabled is false, default = 120s - max_elapsed_time: - - sending_queue: - # default = false - enabled: {true, false} - # number of consumers that dequeue batches; ignored if enabled is false, - # default = 10 - num_consumers: - # when set, enables persistence and uses the component specified as a storage extension for the persistent queue - # make sure to configure and add a `file_storage` extension in `service.extensions`. - # default = None - storage: - # maximum number of batches kept in memory before data; - # ignored if enabled is false, default = 1000 - # - # user should calculate this as num_seconds * requests_per_second where: - # num_seconds is the number of seconds to buffer in case of a backend outage, - # requests_per_second is the average number of requests per seconds. - queue_size: - - # defines if sticky session support is enable - # more details about sticky sessions for ALB could be found here: - # https://docs.aws.amazon.com/elasticloadbalancing/latest/application/sticky-sessions.html - # default = false - sticky_session_enabled: {true, false} -``` - -## Metrics - -The Sumo Logic Exporter exposes the following metrics: - -- `otelcol_exporter_requests_bytes` (`counter`) - total size of HTTP requests (in bytes) -- `otelcol_exporter_requests_duration` (`counter`) - duration of HTTP requests (in milliseconds) -- `otelcol_exporter_requests_records` (`counter`) - total size of HTTP requests (in number of records) -- `otelcol_exporter_requests_sent` (`counter`) - number of HTTP requests - -All of the above metrics have the following dimensions: - -- `endpoint` - endpoint address -- `exporter` - exporter name -- `pipeline` - pipeline name (`logs`, `metrics` or `traces`) -- `status_code` - HTTP response status code (`0` in case of error) - -## Example Configuration - -### Example with sumologicextension - -```yaml -extensions: - sumologic: - installation_token: - collector_name: my_collector - -receivers: - hostmetrics: - collection_interval: 30s - scrapers: - load: - -exporters: - sumologic: - -processors: - source: - source_category: "custom category" - source_name: "custom name" - source_host: "%{k8s.pod.name}" - -service: - extensions: [sumologic] - pipelines: - metrics: - receivers: [hostmetrics] - processors: [source] - exporters: [sumologic] -``` - -### Example without sumologicextension - -```yaml -exporters: - sumologic: - endpoint: http://localhost:3000 - compress_encoding: "gzip" - max_request_body_size: "1_048_576" # 1MB - log_format: "text" - metric_format: "prometheus" -processors: - source: - source_category: "custom category" - source_name: "custom name" - source_host: "custom host" -``` - -### Example with persistent queue - -```yaml -exporters: - sumologic: - endpoint: http://localhost:3000 - metric_format: prometheus - sending_queue: - enabled: true - storage: file_storage - -extensions: - file_storage: - directory: . - -receivers: - hostmetrics: - collection_interval: 3s - scrapers: - load: - -service: - extensions: - - file_storage - pipelines: - metrics: - exporters: - - sumologic - receivers: - - hostmetrics -``` diff --git a/pkg/exporter/sumologicexporter/config.go b/pkg/exporter/sumologicexporter/config.go deleted file mode 100644 index d75d3dcfe2..0000000000 --- a/pkg/exporter/sumologicexporter/config.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2020, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "errors" - "fmt" - "net/url" - "time" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configauth" - "go.opentelemetry.io/collector/config/configcompression" - "go.opentelemetry.io/collector/config/confighttp" - "go.opentelemetry.io/collector/config/configretry" - "go.opentelemetry.io/collector/exporter/exporterhelper" - - "github.com/open-telemetry/opentelemetry-collector-contrib/extension/sumologicextension" -) - -// Config defines configuration for Sumo Logic exporter. -type Config struct { - confighttp.ClientConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. - exporterhelper.QueueSettings `mapstructure:"sending_queue"` - configretry.BackOffConfig `mapstructure:"retry_on_failure"` - - // Compression encoding format, either empty string, gzip or deflate (default gzip) - // Empty string means no compression - // NOTE: CompressEncoding is deprecated and will be removed in an upcoming release - CompressEncoding configcompression.Type `mapstructure:"compress_encoding"` - // Max HTTP request body size in bytes before compression (if applied). - // By default 1MB is recommended. - MaxRequestBodySize int `mapstructure:"max_request_body_size"` - - // Logs related configuration - // Format to post logs into Sumo. (default json) - // * text - Logs will appear in Sumo Logic in text format. - // * json - Logs will appear in Sumo Logic in json format. - // * otlp - Logs will be send in otlp format and will appear in Sumo Logic in text format. - LogFormat LogFormatType `mapstructure:"log_format"` - - // Metrics related configuration - // The format of metrics you will be sending, either otlp or prometheus (Default is otlp) - MetricFormat MetricFormatType `mapstructure:"metric_format"` - - // Decompose OTLP Histograms into individual metrics, similar to how they're represented in Prometheus format - DecomposeOtlpHistograms bool `mapstructure:"decompose_otlp_histograms"` - - // Traces related configuration - // The format of traces you will be sending, currently only otlp format is supported - TraceFormat TraceFormatType `mapstructure:"trace_format"` - - // Sumo specific options - // Name of the client - Client string `mapstructure:"client"` - - // StickySessionEnabled defines if sticky session support is enable. - // By default this is false. - StickySessionEnabled bool `mapstructure:"sticky_session_enabled"` -} - -// CreateDefaultClientConfig returns default http client settings -func CreateDefaultClientConfig() confighttp.ClientConfig { - return confighttp.ClientConfig{ - Timeout: defaultTimeout, - Compression: DefaultCompressEncoding, - Auth: &configauth.Authentication{ - AuthenticatorID: component.NewID(sumologicextension.NewFactory().Type()), - }, - } -} - -func (cfg *Config) Validate() error { - - switch cfg.CompressEncoding { - case configcompression.TypeGzip: - case configcompression.TypeDeflate: - case NoCompression: - - default: - return fmt.Errorf("invalid compression encoding type: %v", cfg.ClientConfig.Compression) - } - - switch cfg.ClientConfig.Compression { - case configcompression.TypeGzip: - case configcompression.TypeDeflate: - case configcompression.TypeZstd: - case NoCompression: - - default: - return fmt.Errorf("invalid compression encoding type: %v", cfg.ClientConfig.Compression) - } - - if cfg.CompressEncoding != NoCompression && cfg.ClientConfig.Compression != DefaultCompressEncoding { - return fmt.Errorf("compress_encoding is deprecated and should not be used when compression is set to a non-default value") - } - - switch cfg.LogFormat { - case OTLPLogFormat: - case JSONFormat: - case TextFormat: - default: - return fmt.Errorf("unexpected log format: %s", cfg.LogFormat) - } - - switch cfg.MetricFormat { - case OTLPMetricFormat: - case PrometheusFormat: - case RemovedGraphiteFormat: - return fmt.Errorf("support for the graphite metric format was removed, please use prometheus or otlp instead") - case RemovedCarbon2Format: - return fmt.Errorf("support for the carbon2 metric format was removed, please use prometheus or otlp instead") - default: - return fmt.Errorf("unexpected metric format: %s", cfg.MetricFormat) - } - - switch cfg.TraceFormat { - case OTLPTraceFormat: - default: - return fmt.Errorf("unexpected trace format: %s", cfg.TraceFormat) - } - - if len(cfg.ClientConfig.Endpoint) == 0 && cfg.ClientConfig.Auth == nil { - return errors.New("no endpoint and no auth extension specified") - } - - if _, err := url.Parse(cfg.ClientConfig.Endpoint); err != nil { - return fmt.Errorf("failed parsing endpoint URL: %s; err: %w", - cfg.ClientConfig.Endpoint, err, - ) - } - - if err := cfg.QueueSettings.Validate(); err != nil { - return fmt.Errorf("queue settings has invalid configuration: %w", err) - } - - return nil -} - -// LogFormatType represents log_format -type LogFormatType string - -// MetricFormatType represents metric_format -type MetricFormatType string - -// TraceFormatType represents trace_format -type TraceFormatType string - -// PipelineType represents type of the pipeline -type PipelineType string - -const ( - // TextFormat represents log_format: text - TextFormat LogFormatType = "text" - // JSONFormat represents log_format: json - JSONFormat LogFormatType = "json" - // OTLPLogFormat represents log_format: otlp - OTLPLogFormat LogFormatType = "otlp" - // RemovedGraphiteFormat represents the no longer supported graphite metric format - RemovedGraphiteFormat MetricFormatType = "graphite" - // RemovedCarbon2Format represents the no longer supported carbon2 metric format - RemovedCarbon2Format MetricFormatType = "carbon2" - // PrometheusFormat represents metric_format: prometheus - PrometheusFormat MetricFormatType = "prometheus" - // OTLPMetricFormat represents metric_format: otlp - OTLPMetricFormat MetricFormatType = "otlp" - // OTLPTraceFormat represents trace_format: otlp - OTLPTraceFormat TraceFormatType = "otlp" - // NoCompression represents disabled compression - NoCompression configcompression.Type = "" - // MetricsPipeline represents metrics pipeline - MetricsPipeline PipelineType = "metrics" - // LogsPipeline represents metrics pipeline - LogsPipeline PipelineType = "logs" - // TracesPipeline represents traces pipeline - TracesPipeline PipelineType = "traces" - // defaultTimeout - defaultTimeout time.Duration = 30 * time.Second - // DefaultCompress defines default Compress - DefaultCompress bool = true - // DefaultCompressEncoding defines default CompressEncoding - DefaultCompressEncoding configcompression.Type = "gzip" - // DefaultMaxRequestBodySize defines default MaxRequestBodySize in bytes - DefaultMaxRequestBodySize int = 1 * 1024 * 1024 - // DefaultLogFormat defines default LogFormat - DefaultLogFormat LogFormatType = OTLPLogFormat - // DefaultMetricFormat defines default MetricFormat - DefaultMetricFormat MetricFormatType = OTLPMetricFormat - // DefaultClient defines default Client - DefaultClient string = "otelcol" - // DefaultLogKey defines default LogKey value - DefaultLogKey string = "log" - // DefaultDropRoutingAttribute defines default DropRoutingAttribute - DefaultDropRoutingAttribute string = "" - // DefaultStickySessionEnabled defines default StickySessionEnabled value - DefaultStickySessionEnabled bool = false -) diff --git a/pkg/exporter/sumologicexporter/config_test.go b/pkg/exporter/sumologicexporter/config_test.go deleted file mode 100644 index 3ae10f3f1a..0000000000 --- a/pkg/exporter/sumologicexporter/config_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package sumologicexporter - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/confighttp" -) - -func TestInitExporterInvalidConfiguration(t *testing.T) { - testcases := []struct { - name string - cfg *Config - expectedError error - }{ - { - name: "unexpected log format", - expectedError: errors.New("unexpected log format: test_format"), - cfg: &Config{ - LogFormat: "test_format", - MetricFormat: "otlp", - TraceFormat: "otlp", - ClientConfig: confighttp.ClientConfig{ - Timeout: defaultTimeout, - Endpoint: "test_endpoint", - }, - }, - }, - { - name: "unexpected metric format", - expectedError: errors.New("unexpected metric format: test_format"), - cfg: &Config{ - LogFormat: "json", - MetricFormat: "test_format", - ClientConfig: confighttp.ClientConfig{ - Timeout: defaultTimeout, - Endpoint: "test_endpoint", - Compression: "gzip", - }, - }, - }, - { - name: "unsupported Carbon2 metrics format", - expectedError: errors.New("support for the carbon2 metric format was removed, please use prometheus or otlp instead"), - cfg: &Config{ - LogFormat: "json", - MetricFormat: "carbon2", - ClientConfig: confighttp.ClientConfig{ - Timeout: defaultTimeout, - Endpoint: "test_endpoint", - Compression: "gzip", - }, - }, - }, - { - name: "unsupported Graphite metrics format", - expectedError: errors.New("support for the graphite metric format was removed, please use prometheus or otlp instead"), - cfg: &Config{ - LogFormat: "json", - MetricFormat: "graphite", - ClientConfig: confighttp.ClientConfig{ - Timeout: defaultTimeout, - Endpoint: "test_endpoint", - Compression: "gzip", - }, - }, - }, - { - name: "unexpected trace format", - expectedError: errors.New("unexpected trace format: text"), - cfg: &Config{ - LogFormat: "json", - MetricFormat: "otlp", - TraceFormat: "text", - ClientConfig: confighttp.ClientConfig{ - Timeout: defaultTimeout, - Endpoint: "test_endpoint", - Compression: "gzip", - }, - }, - }, - { - name: "no endpoint and no auth extension specified", - expectedError: errors.New("no endpoint and no auth extension specified"), - cfg: &Config{ - LogFormat: "json", - MetricFormat: "otlp", - TraceFormat: "otlp", - ClientConfig: confighttp.ClientConfig{ - Timeout: defaultTimeout, - Compression: "gzip", - }, - }, - }, - } - - for _, tc := range testcases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - err := component.ValidateConfig(tc.cfg) - - if tc.expectedError != nil { - assert.EqualError(t, err, tc.expectedError.Error()) - } else { - assert.NoError(t, err) - } - }) - } -} diff --git a/pkg/exporter/sumologicexporter/deduplicate_errors.go b/pkg/exporter/sumologicexporter/deduplicate_errors.go deleted file mode 100644 index b1417678ad..0000000000 --- a/pkg/exporter/sumologicexporter/deduplicate_errors.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2022 Sumo Logic, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import "fmt" - -// deduplicateErrors replaces duplicate instances of the same error in a slice -// with a single error containing the number of times it occurred added as a suffix. -// For example, three occurrences of "error: 502 Bad Gateway" -// are replaced with a single instance of "error: 502 Bad Gateway (x3)". -func deduplicateErrors(errs []error) []error { - if len(errs) < 2 { - return errs - } - - errorsWithCounts := []errorWithCount{} - for _, err := range errs { - found := false - for i := range errorsWithCounts { - if errorsWithCounts[i].err.Error() == err.Error() { - found = true - errorsWithCounts[i].count += 1 - break - } - } - if !found { - errorsWithCounts = append(errorsWithCounts, errorWithCount{ - err: err, - count: 1, - }) - } - } - - var uniqueErrors []error - for _, errorWithCount := range errorsWithCounts { - if errorWithCount.count == 1 { - uniqueErrors = append(uniqueErrors, errorWithCount.err) - } else { - uniqueErrors = append(uniqueErrors, fmt.Errorf("%s (x%d)", errorWithCount.err, errorWithCount.count)) - } - } - return uniqueErrors -} - -type errorWithCount struct { - err error - count int -} diff --git a/pkg/exporter/sumologicexporter/deduplicate_errors_test.go b/pkg/exporter/sumologicexporter/deduplicate_errors_test.go deleted file mode 100644 index 615bdaf5a8..0000000000 --- a/pkg/exporter/sumologicexporter/deduplicate_errors_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2022 Sumo Logic, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestDeduplicateErrors(t *testing.T) { - testCases := []struct { - name string - errs []error - expected []error - }{ - { - name: "nil is returned as nil", - errs: nil, - expected: nil, - }, - { - name: "single error is returned as-is", - errs: []error{ - errors.New("Single error"), - }, - expected: []error{ - errors.New("Single error"), - }, - }, - { - name: "duplicates are removed", - errs: []error{ - errors.New("failed sending data: 502 Bad Gateway"), - errors.New("failed sending data: 400 Bad Request"), - errors.New("failed sending data: 502 Bad Gateway"), - errors.New("failed sending data: 400 Bad Request"), - errors.New("failed sending data: 400 Bad Request"), - errors.New("failed sending data: 400 Bad Request"), - errors.New("failed sending data: 504 Gateway Timeout"), - errors.New("failed sending data: 502 Bad Gateway"), - }, - expected: []error{ - errors.New("failed sending data: 502 Bad Gateway (x3)"), - errors.New("failed sending data: 400 Bad Request (x4)"), - errors.New("failed sending data: 504 Gateway Timeout"), - }, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - assert.Equal(t, testCase.expected, deduplicateErrors(testCase.errs)) - }) - } -} diff --git a/pkg/exporter/sumologicexporter/exporter.go b/pkg/exporter/sumologicexporter/exporter.go deleted file mode 100644 index 008920f24b..0000000000 --- a/pkg/exporter/sumologicexporter/exporter.go +++ /dev/null @@ -1,493 +0,0 @@ -// Copyright 2020 OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/url" - "path" - "strings" - "sync" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/plog" - "go.opentelemetry.io/collector/pdata/pmetric" - "go.opentelemetry.io/collector/pdata/ptrace" - "go.uber.org/zap" - - "github.com/open-telemetry/opentelemetry-collector-contrib/extension/sumologicextension" -) - -const ( - logsDataUrl = "/api/v1/collector/logs" - metricsDataUrl = "/api/v1/collector/metrics" - tracesDataUrl = "/api/v1/collector/traces" -) - -type sumologicexporter struct { - config *Config - host component.Host - logger *zap.Logger - - clientLock sync.RWMutex - client *http.Client - - prometheusFormatter prometheusFormatter - - // Lock around data URLs is needed because the reconfiguration of the exporter - // can happen asynchronously whenever the exporter is re registering. - dataUrlsLock sync.RWMutex - dataUrlMetrics string - dataUrlLogs string - dataUrlTraces string - - foundSumologicExtension bool - sumologicExtension *sumologicextension.SumologicExtension - - stickySessionCookieLock sync.RWMutex - stickySessionCookie string - - id component.ID -} - -func initExporter(cfg *Config, createSettings exporter.Settings) (*sumologicexporter, error) { - - pf, err := newPrometheusFormatter() - if err != nil { - return nil, err - } - - se := &sumologicexporter{ - config: cfg, - logger: createSettings.Logger, - // NOTE: client is now set in start() - prometheusFormatter: pf, - id: createSettings.ID, - foundSumologicExtension: false, - } - - se.logger.Info( - "Sumo Logic Exporter configured", - zap.String("log_format", string(cfg.LogFormat)), - zap.String("metric_format", string(cfg.MetricFormat)), - zap.String("trace_format", string(cfg.TraceFormat)), - ) - - return se, nil -} - -func newLogsExporter( - ctx context.Context, - params exporter.Settings, - cfg *Config, -) (exporter.Logs, error) { - se, err := initExporter(cfg, params) - if err != nil { - return nil, fmt.Errorf("failed to initialize the logs exporter: %w", err) - } - - return exporterhelper.NewLogsExporter( - ctx, - params, - cfg, - se.pushLogsData, - // Disable exporterhelper Timeout, since we are using a custom mechanism - // within exporter itself - exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), - exporterhelper.WithRetry(cfg.BackOffConfig), - exporterhelper.WithQueue(cfg.QueueSettings), - exporterhelper.WithStart(se.start), - exporterhelper.WithShutdown(se.shutdown), - ) -} - -func newMetricsExporter( - ctx context.Context, - params exporter.Settings, - cfg *Config, -) (exporter.Metrics, error) { - se, err := initExporter(cfg, params) - if err != nil { - return nil, err - } - - return exporterhelper.NewMetricsExporter( - ctx, - params, - cfg, - se.pushMetricsData, - // Disable exporterhelper Timeout, since we are using a custom mechanism - // within exporter itself - exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), - exporterhelper.WithRetry(cfg.BackOffConfig), - exporterhelper.WithQueue(cfg.QueueSettings), - exporterhelper.WithStart(se.start), - exporterhelper.WithShutdown(se.shutdown), - ) -} - -func newTracesExporter( - ctx context.Context, - params exporter.Settings, - cfg *Config, -) (exporter.Traces, error) { - se, err := initExporter(cfg, params) - if err != nil { - return nil, err - } - - return exporterhelper.NewTracesExporter( - ctx, - params, - cfg, - se.pushTracesData, - // Disable exporterhelper Timeout, since we are using a custom mechanism - // within exporter itself - exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), - exporterhelper.WithRetry(cfg.BackOffConfig), - exporterhelper.WithQueue(cfg.QueueSettings), - exporterhelper.WithStart(se.start), - exporterhelper.WithShutdown(se.shutdown), - ) -} - -// pushLogsData groups data with common metadata and sends them as separate batched requests. -// It returns the number of unsent logs and an error which contains a list of dropped records -// so they can be handled by OTC retry mechanism -func (se *sumologicexporter) pushLogsData(ctx context.Context, ld plog.Logs) error { - logsUrl, metricsUrl, tracesUrl := se.getDataURLs() - sdr := newSender( - se.logger, - se.config, - se.getHTTPClient(), - se.prometheusFormatter, - metricsUrl, - logsUrl, - tracesUrl, - se.StickySessionCookie, - se.SetStickySessionCookie, - se.id, - ) - - // Follow different execution path for OTLP format - if sdr.config.LogFormat == OTLPLogFormat { - if err := sdr.sendOTLPLogs(ctx, ld); err != nil { - se.handleUnauthorizedErrors(ctx, err) - return consumererror.NewLogs(err, ld) - } - return nil - } - - type droppedResourceRecords struct { - resource pcommon.Resource - records []plog.LogRecord - } - var ( - errs []error - dropped []droppedResourceRecords - ) - - // Iterate over ResourceLogs - rls := ld.ResourceLogs() - for i := 0; i < rls.Len(); i++ { - rl := rls.At(i) - - currentMetadata := newFields(rl.Resource().Attributes()) - - if droppedRecords, err := sdr.sendNonOTLPLogs(ctx, rl, currentMetadata); err != nil { - dropped = append(dropped, droppedResourceRecords{ - resource: rl.Resource(), - records: droppedRecords, - }) - errs = append(errs, err) - } - } - - if len(dropped) > 0 { - ld = plog.NewLogs() - - // Copy all dropped records to Logs - // NOTE: we only copy resource and log records here. - // Scope is not handled properly but it never was. - for i := range dropped { - rls := ld.ResourceLogs().AppendEmpty() - dropped[i].resource.CopyTo(rls.Resource()) - - for j := 0; j < len(dropped[i].records); j++ { - dropped[i].records[j].CopyTo( - rls.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty(), - ) - } - } - - errs = deduplicateErrors(errs) - se.handleUnauthorizedErrors(ctx, errs...) - return consumererror.NewLogs(errors.Join(errs...), ld) - } - - return nil -} - -// pushMetricsData groups data with common metadata and send them as separate batched requests -// it returns number of unsent metrics and error which contains list of dropped records -// so they can be handle by the OTC retry mechanism -func (se *sumologicexporter) pushMetricsData(ctx context.Context, md pmetric.Metrics) error { - logsUrl, metricsUrl, tracesUrl := se.getDataURLs() - sdr := newSender( - se.logger, - se.config, - se.getHTTPClient(), - se.prometheusFormatter, - metricsUrl, - logsUrl, - tracesUrl, - se.StickySessionCookie, - se.SetStickySessionCookie, - se.id, - ) - - var droppedMetrics pmetric.Metrics - var errs []error - if sdr.config.MetricFormat == OTLPMetricFormat { - if err := sdr.sendOTLPMetrics(ctx, md); err != nil { - droppedMetrics = md - errs = []error{err} - } - } else { - droppedMetrics, errs = sdr.sendNonOTLPMetrics(ctx, md) - } - - if len(errs) > 0 { - se.handleUnauthorizedErrors(ctx, errs...) - return consumererror.NewMetrics(errors.Join(errs...), droppedMetrics) - } - - return nil -} - -// handleUnauthorizedErrors checks if any of the provided errors is an unauthorized error. -// In which case it triggers exporter reconfiguration which in turn takes the credentials -// from sumologicextension which at this point should already detect the problem with -// authorization (via heartbeats) and prepare new collector credentials to be available. -func (se *sumologicexporter) handleUnauthorizedErrors(ctx context.Context, errs ...error) { - for _, err := range errs { - if errors.Is(err, errUnauthorized) { - se.logger.Warn("Received unauthorized status code, triggering reconfiguration") - if errC := se.configure(ctx); errC != nil { - se.logger.Error("Error configuring the exporter with new credentials", zap.Error(err)) - } else { - // It's enough to successfully reconfigure the exporter just once. - return - } - } - } -} - -func (se *sumologicexporter) pushTracesData(ctx context.Context, td ptrace.Traces) error { - logsUrl, metricsUrl, tracesUrl := se.getDataURLs() - sdr := newSender( - se.logger, - se.config, - se.getHTTPClient(), - se.prometheusFormatter, - metricsUrl, - logsUrl, - tracesUrl, - se.StickySessionCookie, - se.SetStickySessionCookie, - se.id, - ) - - err := sdr.sendTraces(ctx, td) - se.handleUnauthorizedErrors(ctx, err) - return err -} - -func (se *sumologicexporter) start(ctx context.Context, host component.Host) error { - se.host = host - return se.configure(ctx) -} - -func (se *sumologicexporter) configure(ctx context.Context) error { - var ( - ext *sumologicextension.SumologicExtension - foundSumoExt bool - ) - - if se.config.CompressEncoding != NoCompression { - se.config.ClientConfig.Compression = se.config.CompressEncoding - } - - httpSettings := se.config.ClientConfig - - for _, e := range se.host.GetExtensions() { - v, ok := e.(*sumologicextension.SumologicExtension) - if ok && httpSettings.Auth.AuthenticatorID == v.ComponentID() { - ext = v - foundSumoExt = true - se.foundSumologicExtension = true - se.sumologicExtension = ext - break - } - } - - if httpSettings.Endpoint == "" && httpSettings.Auth != nil && - httpSettings.Auth.AuthenticatorID.Type() == sumologicextension.NewFactory().Type() { - // If user specified using sumologicextension as auth but none was - // found then return an error. - if !foundSumoExt { - return fmt.Errorf( - "sumologic was specified as auth extension (named: %q) but "+ - "a matching extension was not found in the config, "+ - "please re-check the config and/or define the sumologicextension", - httpSettings.Auth.AuthenticatorID.String(), - ) - } - - // If we're using sumologicextension as authentication extension and - // endpoint was not set then send data on a collector generic ingest URL - // with authentication set by sumologicextension. - - u, err := url.Parse(ext.BaseURL()) - if err != nil { - return fmt.Errorf("failed to parse API base URL from sumologicextension: %w", err) - } - - logsUrl := *u - logsUrl.Path = logsDataUrl - metricsUrl := *u - metricsUrl.Path = metricsDataUrl - tracesUrl := *u - tracesUrl.Path = tracesDataUrl - se.setDataURLs(logsUrl.String(), metricsUrl.String(), tracesUrl.String()) - - } else if httpSettings.Endpoint != "" { - logsUrl, err := getSignalURL(se.config, httpSettings.Endpoint, component.DataTypeLogs) - if err != nil { - return err - } - metricsUrl, err := getSignalURL(se.config, httpSettings.Endpoint, component.DataTypeMetrics) - if err != nil { - return err - } - tracesUrl, err := getSignalURL(se.config, httpSettings.Endpoint, component.DataTypeTraces) - if err != nil { - return err - } - se.setDataURLs(logsUrl, metricsUrl, tracesUrl) - - // Clean authenticator if set to sumologic. - // Setting to null in configuration doesn't work, so we have to force it that way. - if httpSettings.Auth != nil && httpSettings.Auth.AuthenticatorID.Type() == sumologicextension.NewFactory().Type() { - httpSettings.Auth = nil - } - } else { - return fmt.Errorf("no auth extension and no endpoint specified") - } - - client, err := httpSettings.ToClient(ctx, se.host, component.TelemetrySettings{}) - if err != nil { - return fmt.Errorf("failed to create HTTP Client: %w", err) - } - - se.setHTTPClient(client) - return nil -} - -func (se *sumologicexporter) setHTTPClient(client *http.Client) { - se.clientLock.Lock() - se.client = client - se.clientLock.Unlock() -} - -func (se *sumologicexporter) getHTTPClient() *http.Client { - se.clientLock.RLock() - defer se.clientLock.RUnlock() - return se.client -} - -func (se *sumologicexporter) setDataURLs(logs, metrics, traces string) { - se.dataUrlsLock.Lock() - se.logger.Info("setting data urls", zap.String("logs_url", logs), zap.String("metrics_url", metrics), zap.String("traces_url", traces)) - se.dataUrlLogs, se.dataUrlMetrics, se.dataUrlTraces = logs, metrics, traces - se.dataUrlsLock.Unlock() -} - -func (se *sumologicexporter) getDataURLs() (logs, metrics, traces string) { - se.dataUrlsLock.RLock() - defer se.dataUrlsLock.RUnlock() - return se.dataUrlLogs, se.dataUrlMetrics, se.dataUrlTraces -} - -func (se *sumologicexporter) shutdown(context.Context) error { - return nil -} - -func (se *sumologicexporter) StickySessionCookie() string { - if se.foundSumologicExtension { - return se.sumologicExtension.StickySessionCookie() - } else { - se.stickySessionCookieLock.RLock() - defer se.stickySessionCookieLock.RUnlock() - return se.stickySessionCookie - } -} - -func (se *sumologicexporter) SetStickySessionCookie(stickySessionCookie string) { - if se.foundSumologicExtension { - se.sumologicExtension.SetStickySessionCookie(stickySessionCookie) - } else { - se.stickySessionCookieLock.Lock() - se.stickySessionCookie = stickySessionCookie - se.stickySessionCookieLock.Unlock() - } -} - -// get the destination url for a given signal type -// this mostly adds signal-specific suffixes if the format is otlp -func getSignalURL(oCfg *Config, endpointUrl string, signal component.DataType) (string, error) { - url, err := url.Parse(endpointUrl) - if err != nil { - return "", err - } - - switch signal { - case component.DataTypeLogs: - if oCfg.LogFormat != "otlp" { - return url.String(), nil - } - case component.DataTypeMetrics: - if oCfg.MetricFormat != "otlp" { - return url.String(), nil - } - case component.DataTypeTraces: - default: - return "", fmt.Errorf("unknown signal type: %s", signal) - } - - signalUrlSuffix := fmt.Sprintf("/v1/%s", signal) - if !strings.HasSuffix(url.Path, signalUrlSuffix) { - url.Path = path.Join(url.Path, signalUrlSuffix) - } - - return url.String(), nil -} diff --git a/pkg/exporter/sumologicexporter/exporter_test.go b/pkg/exporter/sumologicexporter/exporter_test.go deleted file mode 100644 index 7ab397c0d0..0000000000 --- a/pkg/exporter/sumologicexporter/exporter_test.go +++ /dev/null @@ -1,660 +0,0 @@ -// Copyright 2020, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "context" - "errors" - "net/http" - "net/http/httptest" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/config/configcompression" - "go.opentelemetry.io/collector/config/confighttp" - "go.opentelemetry.io/collector/config/configtls" - "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/exporter/exportertest" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/plog" - "go.opentelemetry.io/collector/pdata/pmetric" - "go.opentelemetry.io/collector/pdata/ptrace" -) - -func logRecordsToLogs(records []plog.LogRecord) plog.Logs { - logs := plog.NewLogs() - logsSlice := logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords() - for _, record := range records { - record.CopyTo(logsSlice.AppendEmpty()) - } - - return logs -} - -type exporterTest struct { - srv *httptest.Server - exp *sumologicexporter - reqCounter *int32 -} - -func createTestConfig() *Config { - config := createDefaultConfig().(*Config) - config.ClientConfig.Compression = NoCompression - config.LogFormat = TextFormat - config.MaxRequestBodySize = 20_971_520 - config.MetricFormat = OTLPMetricFormat - return config -} - -// prepareExporterTest prepares an exporter test object using provided config -// and a slice of callbacks to be called for subsequent requests coming being -// sent to the server. -// The enclosed *httptest.Server is automatically closed on test cleanup. -func prepareExporterTest(t *testing.T, cfg *Config, cb []func(w http.ResponseWriter, req *http.Request)) *exporterTest { - var reqCounter int32 - // generate a test server so we can capture and inspect the request - testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - c := int(atomic.LoadInt32(&reqCounter)) - if assert.Greaterf(t, len(cb), c, "Exporter sent more requests (%d) than the number of test callbacks defined: %d", c+1, len(cb)) { - cb[c](w, req) - atomic.AddInt32(&reqCounter, 1) - } - })) - t.Cleanup(func() { - testServer.Close() - - // Ensure we got all required requests - assert.Eventuallyf(t, func() bool { - return int(atomic.LoadInt32(&reqCounter)) == len(cb) - }, 2*time.Second, 100*time.Millisecond, - "HTTP server didn't receive all the expected requests; got: %d, expected: %d", - atomic.LoadInt32(&reqCounter), len(cb), - ) - }) - - cfg.ClientConfig.Endpoint = testServer.URL - cfg.ClientConfig.Auth = nil - - exp, err := initExporter(cfg, exportertest.NewNopSettings()) - require.NoError(t, err) - - require.NoError(t, exp.start(context.Background(), componenttest.NewNopHost())) - - return &exporterTest{ - srv: testServer, - exp: exp, - reqCounter: &reqCounter, - } -} - -func TestAllSuccess(t *testing.T) { - test := prepareExporterTest(t, createTestConfig(), []func(w http.ResponseWriter, req *http.Request){ - func(_ http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - assert.Equal(t, `Example log`, body) - assert.Equal(t, "", req.Header.Get("X-Sumo-Fields")) - }, - }) - - logs := logRecordsToLogs(exampleLog()) - logs.MarkReadOnly() - - err := test.exp.pushLogsData(context.Background(), logs) - assert.NoError(t, err) -} - -func TestLogsResourceAttributesSentAsFields(t *testing.T) { - testcases := []struct { - name string - configFunc func() *Config - callbacks []func(w http.ResponseWriter, req *http.Request) - logsFunc func() plog.Logs - }{ - { - name: "text", - configFunc: func() *Config { - config := createTestConfig() - config.LogFormat = TextFormat - return config - }, - callbacks: []func(w http.ResponseWriter, req *http.Request){ - func(_ http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - assert.Equal(t, "Example log\nAnother example log", body) - assert.Equal(t, "res_attr1=1, res_attr2=2", req.Header.Get("X-Sumo-Fields")) - }, - }, - logsFunc: func() plog.Logs { - buffer := make([]plog.LogRecord, 2) - buffer[0] = plog.NewLogRecord() - buffer[0].Body().SetStr("Example log") - buffer[0].Attributes().PutStr("key1", "value1") - buffer[0].Attributes().PutStr("key2", "value2") - buffer[1] = plog.NewLogRecord() - buffer[1].Body().SetStr("Another example log") - buffer[1].Attributes().PutStr("key1", "value1") - buffer[1].Attributes().PutStr("key2", "value2") - buffer[1].Attributes().PutStr("key3", "value3") - - logs := logRecordsToLogs(buffer) - logs.ResourceLogs().At(0).Resource().Attributes().PutStr("res_attr1", "1") - logs.ResourceLogs().At(0).Resource().Attributes().PutStr("res_attr2", "2") - logs.MarkReadOnly() - return logs - }, - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - cfg := tc.configFunc() - test := prepareExporterTest(t, cfg, tc.callbacks) - - logs := tc.logsFunc() - assert.NoError(t, test.exp.pushLogsData(context.Background(), logs)) - assert.EqualValues(t, len(tc.callbacks), atomic.LoadInt32(test.reqCounter)) - }) - } -} - -func TestAllFailed(t *testing.T) { - test := prepareExporterTest(t, createTestConfig(), []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(500) - - body := extractBody(t, req) - assert.Equal(t, "Example log\nAnother example log", body) - assert.Empty(t, req.Header.Get("X-Sumo-Fields")) - }, - }) - - logs := plog.NewLogs() - logsSlice := logs.ResourceLogs().AppendEmpty() - logsRecords1 := logsSlice.ScopeLogs().AppendEmpty().LogRecords() - logsRecords1.AppendEmpty().Body().SetStr("Example log") - - logsRecords2 := logsSlice.ScopeLogs().AppendEmpty().LogRecords() - logsRecords2.AppendEmpty().Body().SetStr("Another example log") - - logs.MarkReadOnly() - - logsExpected := plog.NewLogs() - logsSlice.CopyTo(logsExpected.ResourceLogs().AppendEmpty()) - - err := test.exp.pushLogsData(context.Background(), logs) - assert.EqualError(t, err, "failed sending data: status: 500 Internal Server Error") - - var partial consumererror.Logs - require.True(t, errors.As(err, &partial)) - assert.Equal(t, logsExpected, partial.Data()) -} - -func TestPartiallyFailed(t *testing.T) { - test := prepareExporterTest(t, createTestConfig(), []func(w http.ResponseWriter, req *http.Request){ - func(_ http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - assert.Equal(t, "Example log", body) - // No resource attributes for those logs hence no fields - assert.Empty(t, req.Header.Get("X-Sumo-Fields")) - }, - func(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(500) - - body := extractBody(t, req) - assert.Equal(t, "Another example log", body) - // No resource attributes for those logs hence no fields - assert.Empty(t, req.Header.Get("X-Sumo-Fields")) - }, - }) - - logs := plog.NewLogs() - logsSlice1 := logs.ResourceLogs().AppendEmpty() - logsRecords1 := logsSlice1.ScopeLogs().AppendEmpty().LogRecords() - logsRecords1.AppendEmpty().Body().SetStr("Example log") - logsSlice2 := logs.ResourceLogs().AppendEmpty() - logsRecords2 := logsSlice2.ScopeLogs().AppendEmpty().LogRecords() - logsRecords2.AppendEmpty().Body().SetStr("Another example log") - - logs.MarkReadOnly() - - logsExpected := plog.NewLogs() - logsSlice2.CopyTo(logsExpected.ResourceLogs().AppendEmpty()) - - err := test.exp.pushLogsData(context.Background(), logs) - assert.EqualError(t, err, "failed sending data: status: 500 Internal Server Error") - - var partial consumererror.Logs - require.True(t, errors.As(err, &partial)) - assert.Equal(t, logsExpected, partial.Data()) -} - -func TestInvalidHTTPCLient(t *testing.T) { - exp, err := initExporter(&Config{ - ClientConfig: confighttp.ClientConfig{ - Endpoint: "test_endpoint", - TLSSetting: configtls.ClientConfig{ - Config: configtls.Config{ - MinVersion: "invalid", - }, - }, - }, - }, exportertest.NewNopSettings()) - require.NoError(t, err) - - assert.EqualError(t, - exp.start(context.Background(), componenttest.NewNopHost()), - "failed to create HTTP Client: failed to load TLS config: invalid TLS min_version: unsupported TLS version: \"invalid\"", - ) -} - -func TestPushLogs_DontRemoveSourceAttributes(t *testing.T) { - createLogs := func() plog.Logs { - logs := plog.NewLogs() - resourceLogs := logs.ResourceLogs().AppendEmpty() - logsSlice := resourceLogs.ScopeLogs().AppendEmpty().LogRecords() - - logRecords := make([]plog.LogRecord, 2) - logRecords[0] = plog.NewLogRecord() - logRecords[0].Body().SetStr("Example log aaaaaaaaaaaaaaaaaaaaaa 1") - logRecords[0].CopyTo(logsSlice.AppendEmpty()) - logRecords[1] = plog.NewLogRecord() - logRecords[1].Body().SetStr("Example log aaaaaaaaaaaaaaaaaaaaaa 2") - logRecords[1].CopyTo(logsSlice.AppendEmpty()) - - resourceAttrs := resourceLogs.Resource().Attributes() - resourceAttrs.PutStr("hostname", "my-host-name") - resourceAttrs.PutStr("hosttype", "my-host-type") - resourceAttrs.PutStr("_sourceCategory", "my-source-category") - resourceAttrs.PutStr("_sourceHost", "my-source-host") - resourceAttrs.PutStr("_sourceName", "my-source-name") - logs.MarkReadOnly() - - return logs - } - - callbacks := []func(w http.ResponseWriter, req *http.Request){ - func(_ http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - assert.Equal(t, "Example log aaaaaaaaaaaaaaaaaaaaaa 1", body) - assert.Equal(t, "hostname=my-host-name, hosttype=my-host-type", req.Header.Get("X-Sumo-Fields")) - assert.Equal(t, "my-source-category", req.Header.Get("X-Sumo-Category")) - assert.Equal(t, "my-source-host", req.Header.Get("X-Sumo-Host")) - assert.Equal(t, "my-source-name", req.Header.Get("X-Sumo-Name")) - for k, v := range req.Header { - t.Logf("request #1 header: %v=%v", k, v) - } - }, - func(_ http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - assert.Equal(t, "Example log aaaaaaaaaaaaaaaaaaaaaa 2", body) - assert.Equal(t, "hostname=my-host-name, hosttype=my-host-type", req.Header.Get("X-Sumo-Fields")) - assert.Equal(t, "my-source-category", req.Header.Get("X-Sumo-Category")) - assert.Equal(t, "my-source-host", req.Header.Get("X-Sumo-Host")) - assert.Equal(t, "my-source-name", req.Header.Get("X-Sumo-Name")) - for k, v := range req.Header { - t.Logf("request #2 header: %v=%v", k, v) - } - }, - } - - config := createTestConfig() - config.LogFormat = TextFormat - config.MaxRequestBodySize = 32 - - test := prepareExporterTest(t, config, callbacks) - assert.NoError(t, test.exp.pushLogsData(context.Background(), createLogs())) -} - -func TestAllMetricsSuccess(t *testing.T) { - testcases := []struct { - name string - expectedBody string - metricFunc func() (pmetric.Metric, pcommon.Map) - }{ - { - name: "sum", - expectedBody: `test.metric.data{test="test_value",test2="second_value"} 14500 1605534165000`, - metricFunc: exampleIntMetric, - }, - { - name: "gauge", - expectedBody: `gauge_metric_name{foo="bar",remote_name="156920",url="http://example_url"} 124 1608124661166 -gauge_metric_name{foo="bar",remote_name="156955",url="http://another_url"} 245 1608124662166`, - metricFunc: exampleIntGaugeMetric, - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - test := prepareExporterTest(t, createTestConfig(), []func(w http.ResponseWriter, req *http.Request){ - func(_ http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - assert.Equal(t, tc.expectedBody, body) - assert.Equal(t, "application/vnd.sumologic.prometheus", req.Header.Get("Content-Type")) - }, - }) - test.exp.config.MetricFormat = PrometheusFormat - - metric := metricAndAttributesToPdataMetrics(tc.metricFunc()) - metric.MarkReadOnly() - - err := test.exp.pushMetricsData(context.Background(), metric) - assert.NoError(t, err) - }) - } -} - -func TestAllMetricsOTLP(t *testing.T) { - test := prepareExporterTest(t, createTestConfig(), []func(w http.ResponseWriter, req *http.Request){ - func(_ http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - - md, err := (&pmetric.ProtoUnmarshaler{}).UnmarshalMetrics([]byte(body)) - assert.NoError(t, err) - assert.NotNil(t, md) - - //nolint:lll - expected := "\nf\n/\n\x14\n\x04test\x12\f\n\ntest_value\n\x17\n\x05test2\x12\x0e\n\fsecond_value\x123\n\x00\x12/\n\x10test.metric.data\x1a\x05bytes:\x14\n\x12\x19\x00\x12\x94\v\xd1\x00H\x161\xa48\x00\x00\x00\x00\x00\x00\n\xc2\x01\n\x0e\n\f\n\x03foo\x12\x05\n\x03bar\x12\xaf\x01\n\x00\x12\xaa\x01\n\x11gauge_metric_name*\x94\x01\nH\x19\x80GX\xef\xdb4Q\x161|\x00\x00\x00\x00\x00\x00\x00:\x17\n\vremote_name\x12\b\n\x06156920:\x1b\n\x03url\x12\x14\n\x12http://example_url\nH\x19\x80\x11\xf3*\xdc4Q\x161\xf5\x00\x00\x00\x00\x00\x00\x00:\x17\n\vremote_name\x12\b\n\x06156955:\x1b\n\x03url\x12\x14\n\x12http://another_url" - assert.Equal(t, expected, body) - assert.Equal(t, "application/x-protobuf", req.Header.Get("Content-Type")) - }, - }) - test.exp.config.MetricFormat = OTLPMetricFormat - - metricSum, attrsSum := exampleIntMetric() - metricGauge, attrsGauge := exampleIntGaugeMetric() - metrics := metricPairToMetrics( - metricPair{ - attributes: attrsSum, - metric: metricSum, - }, - metricPair{ - attributes: attrsGauge, - metric: metricGauge, - }, - ) - - err := test.exp.pushMetricsData(context.Background(), metrics) - assert.NoError(t, err) -} - -func TestAllMetricsFailed(t *testing.T) { - testcases := []struct { - name string - callbacks []func(w http.ResponseWriter, req *http.Request) - metricFunc func() pmetric.Metrics - expectedError string - }{ - { - name: "sent together when metrics under the same resource", - callbacks: []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(500) - - body := extractBody(t, req) - expected := `test.metric.data{test="test_value",test2="second_value"} 14500 1605534165000 -gauge_metric_name{test="test_value",test2="second_value",remote_name="156920",url="http://example_url"} 124 1608124661166 -gauge_metric_name{test="test_value",test2="second_value",remote_name="156955",url="http://another_url"} 245 1608124662166` - assert.Equal(t, expected, body) - assert.Equal(t, "application/vnd.sumologic.prometheus", req.Header.Get("Content-Type")) - }, - }, - metricFunc: func() pmetric.Metrics { - metricSum, attrs := exampleIntMetric() - metricGauge, _ := exampleIntGaugeMetric() - metrics := metricAndAttrsToPdataMetrics( - attrs, - metricSum, metricGauge, - ) - metrics.MarkReadOnly() - return metrics - }, - expectedError: "failed sending data: status: 500 Internal Server Error", - }, - { - name: "sent together when metrics under different resources", - callbacks: []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(500) - - body := extractBody(t, req) - expected := `test.metric.data{test="test_value",test2="second_value"} 14500 1605534165000 -gauge_metric_name{foo="bar",remote_name="156920",url="http://example_url"} 124 1608124661166 -gauge_metric_name{foo="bar",remote_name="156955",url="http://another_url"} 245 1608124662166` - assert.Equal(t, expected, body) - assert.Equal(t, "application/vnd.sumologic.prometheus", req.Header.Get("Content-Type")) - }, - }, - metricFunc: func() pmetric.Metrics { - metricSum, attrsSum := exampleIntMetric() - metricGauge, attrsGauge := exampleIntGaugeMetric() - metrics := metricPairToMetrics( - metricPair{ - attributes: attrsSum, - metric: metricSum, - }, - metricPair{ - attributes: attrsGauge, - metric: metricGauge, - }, - ) - return metrics - }, - expectedError: "failed sending data: status: 500 Internal Server Error", - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - test := prepareExporterTest(t, createTestConfig(), tc.callbacks) - test.exp.config.MetricFormat = PrometheusFormat - - metrics := tc.metricFunc() - err := test.exp.pushMetricsData(context.Background(), metrics) - - assert.EqualError(t, err, tc.expectedError) - - var partial consumererror.Metrics - require.True(t, errors.As(err, &partial)) - // TODO fix - // assert.Equal(t, metrics, partial.GetMetrics()) - }) - } -} - -func TestMetricsPrometheusFormatMetadataFilter(t *testing.T) { - test := prepareExporterTest(t, createTestConfig(), []func(w http.ResponseWriter, req *http.Request){ - func(_ http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - expected := `test.metric.data{test="test_value",test2="second_value",key1="value1",key2="value2"} 14500 1605534165000` - assert.Equal(t, expected, body) - assert.Equal(t, "application/vnd.sumologic.prometheus", req.Header.Get("Content-Type")) - }, - }) - test.exp.config.MetricFormat = PrometheusFormat - - metrics := metricAndAttributesToPdataMetrics(exampleIntMetric()) - - attrs := metrics.ResourceMetrics().At(0).Resource().Attributes() - attrs.PutStr("key1", "value1") - attrs.PutStr("key2", "value2") - - metrics.MarkReadOnly() - - err := test.exp.pushMetricsData(context.Background(), metrics) - assert.NoError(t, err) -} - -func Benchmark_ExporterPushLogs(b *testing.B) { - createConfig := func() *Config { - config := createDefaultConfig().(*Config) - config.MetricFormat = PrometheusFormat - config.LogFormat = TextFormat - config.ClientConfig.Auth = nil - config.ClientConfig.Compression = configcompression.TypeGzip - return config - } - - testServer := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) { - })) - b.Cleanup(func() { testServer.Close() }) - - cfg := createConfig() - cfg.ClientConfig.Endpoint = testServer.URL - - exp, err := initExporter(cfg, exportertest.NewNopSettings()) - require.NoError(b, err) - require.NoError(b, exp.start(context.Background(), componenttest.NewNopHost())) - defer func() { - require.NoError(b, exp.shutdown(context.Background())) - }() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - wg := sync.WaitGroup{} - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - logs := logRecordsToLogs(exampleNLogs(128)) - logs.MarkReadOnly() - err := exp.pushLogsData(context.Background(), logs) - if err != nil { - b.Logf("Failed pushing logs: %v", err) - } - wg.Done() - }() - } - - wg.Wait() - } -} - -func TestSendEmptyLogsOTLP(t *testing.T) { - test := prepareExporterTest(t, createTestConfig(), []func(w http.ResponseWriter, req *http.Request){ - // No request is sent - }) - - logs := plog.NewLogs() - logs.MarkReadOnly() - - err := test.exp.pushLogsData(context.Background(), logs) - assert.NoError(t, err) -} - -func TestSendEmptyMetricsOTLP(t *testing.T) { - test := prepareExporterTest(t, createTestConfig(), []func(w http.ResponseWriter, req *http.Request){ - // No request is sent - }) - test.exp.config.MetricFormat = OTLPMetricFormat - - metrics := metricPairToMetrics() - - err := test.exp.pushMetricsData(context.Background(), metrics) - assert.NoError(t, err) -} - -func TestSendEmptyTraces(t *testing.T) { - test := prepareExporterTest(t, createTestConfig(), []func(w http.ResponseWriter, req *http.Request){ - // No request is sent - }) - - traces := ptrace.NewTraces() - - err := test.exp.pushTracesData(context.Background(), traces) - assert.NoError(t, err) -} - -func TestGetSignalURL(t *testing.T) { - testCases := []struct { - description string - signalType component.Type - cfg Config - endpointURL string - expected string - errorMessage string - }{ - { - description: "no change if log format not otlp", - signalType: component.DataTypeLogs, - cfg: Config{LogFormat: TextFormat}, - endpointURL: "http://localhost", - expected: "http://localhost", - }, - { - description: "no change if metric format not otlp", - signalType: component.DataTypeMetrics, - cfg: Config{MetricFormat: PrometheusFormat}, - endpointURL: "http://localhost", - expected: "http://localhost", - }, - { - description: "always add suffix for traces if not present", - signalType: component.DataTypeTraces, - endpointURL: "http://localhost", - expected: "http://localhost/v1/traces", - }, - { - description: "always add suffix for logs if not present", - signalType: component.DataTypeLogs, - cfg: Config{LogFormat: OTLPLogFormat}, - endpointURL: "http://localhost", - expected: "http://localhost/v1/logs", - }, - { - description: "always add suffix for metrics if not present", - signalType: component.DataTypeMetrics, - cfg: Config{MetricFormat: OTLPMetricFormat}, - endpointURL: "http://localhost", - expected: "http://localhost/v1/metrics", - }, - { - description: "no change if suffix already present", - signalType: component.DataTypeTraces, - endpointURL: "http://localhost/v1/traces", - expected: "http://localhost/v1/traces", - }, - { - description: "error if url invalid", - signalType: component.DataTypeTraces, - endpointURL: ":", - errorMessage: `parse ":": missing protocol scheme`, - }, - { - description: "error if signal type is unknown", - signalType: component.MustNewType("unknown"), - endpointURL: "http://localhost", - errorMessage: `unknown signal type: unknown`, - }, - } - for _, tC := range testCases { - testCase := tC - t.Run(tC.description, func(t *testing.T) { - actual, err := getSignalURL(&testCase.cfg, testCase.endpointURL, testCase.signalType) - if testCase.errorMessage != "" { - require.Error(t, err) - require.EqualError(t, err, testCase.errorMessage) - } else { - require.NoError(t, err) - } - require.Equal(t, testCase.expected, actual) - }) - } -} diff --git a/pkg/exporter/sumologicexporter/factory.go b/pkg/exporter/sumologicexporter/factory.go deleted file mode 100644 index 1db0297e19..0000000000 --- a/pkg/exporter/sumologicexporter/factory.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2020 OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "context" - "fmt" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configretry" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper" -) - -const ( - // The value of "type" key in configuration. - typeStr = "sumologic" - stabilityLevel = component.StabilityLevelDeprecated -) - -var Type = component.MustNewType(typeStr) - -// NewFactory returns a new factory for the sumologic exporter. -func NewFactory() exporter.Factory { - return exporter.NewFactory( - Type, - createDefaultConfig, - exporter.WithLogs(createLogsExporter, stabilityLevel), - exporter.WithMetrics(createMetricsExporter, stabilityLevel), - exporter.WithTraces(createTracesExporter, stabilityLevel), - ) -} - -func createDefaultConfig() component.Config { - qs := exporterhelper.NewDefaultQueueSettings() - qs.Enabled = false - - return &Config{ - MaxRequestBodySize: DefaultMaxRequestBodySize, - LogFormat: DefaultLogFormat, - MetricFormat: DefaultMetricFormat, - Client: DefaultClient, - TraceFormat: OTLPTraceFormat, - - ClientConfig: CreateDefaultClientConfig(), - BackOffConfig: configretry.NewDefaultBackOffConfig(), - QueueSettings: qs, - StickySessionEnabled: DefaultStickySessionEnabled, - } -} - -func createLogsExporter( - ctx context.Context, - params exporter.Settings, - cfg component.Config, -) (exporter.Logs, error) { - exp, err := newLogsExporter(ctx, params, cfg.(*Config)) - if err != nil { - return nil, fmt.Errorf("failed to create the logs exporter: %w", err) - } - - return exp, nil -} - -func createMetricsExporter( - ctx context.Context, - params exporter.Settings, - cfg component.Config, -) (exporter.Metrics, error) { - exp, err := newMetricsExporter(ctx, params, cfg.(*Config)) - if err != nil { - return nil, fmt.Errorf("failed to create the metrics exporter: %w", err) - } - - return exp, nil -} - -func createTracesExporter( - ctx context.Context, - params exporter.Settings, - cfg component.Config, -) (exporter.Traces, error) { - exp, err := newTracesExporter(ctx, params, cfg.(*Config)) - if err != nil { - return nil, fmt.Errorf("failed to create the traces exporter: %w", err) - } - - return exp, nil -} diff --git a/pkg/exporter/sumologicexporter/factory_test.go b/pkg/exporter/sumologicexporter/factory_test.go deleted file mode 100644 index f9e828d050..0000000000 --- a/pkg/exporter/sumologicexporter/factory_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2020, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configauth" - "go.opentelemetry.io/collector/config/confighttp" - "go.opentelemetry.io/collector/config/configretry" - "go.opentelemetry.io/collector/exporter/exporterhelper" -) - -func TestType(t *testing.T) { - factory := NewFactory() - pType := factory.Type() - assert.Equal(t, pType, Type) -} - -func TestCreateDefaultConfig(t *testing.T) { - factory := NewFactory() - cfg := factory.CreateDefaultConfig() - qs := exporterhelper.NewDefaultQueueSettings() - qs.Enabled = false - - assert.Equal(t, cfg, &Config{ - MaxRequestBodySize: 1_048_576, - LogFormat: "otlp", - MetricFormat: "otlp", - Client: "otelcol", - TraceFormat: "otlp", - - ClientConfig: confighttp.ClientConfig{ - Timeout: 30 * time.Second, - Compression: "gzip", - Auth: &configauth.Authentication{ - AuthenticatorID: component.NewID(Type), - }, - }, - BackOffConfig: configretry.NewDefaultBackOffConfig(), - QueueSettings: qs, - }) - - assert.NoError(t, component.ValidateConfig(cfg)) -} diff --git a/pkg/exporter/sumologicexporter/fields.go b/pkg/exporter/sumologicexporter/fields.go deleted file mode 100644 index 87695143db..0000000000 --- a/pkg/exporter/sumologicexporter/fields.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2020 OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "strings" - - "go.opentelemetry.io/collector/pdata/pcommon" - "golang.org/x/exp/slices" -) - -// fields represents metadata -type fields struct { - orig pcommon.Map - initialized bool -} - -func newFields(attrMap pcommon.Map) fields { - return fields{ - orig: attrMap, - initialized: true, - } -} - -func (f fields) isInitialized() bool { - return f.initialized -} - -// string returns fields as ordered key=value string with `, ` as separator -func (f fields) string() string { - if !f.initialized { - return "" - } - - returnValue := make([]string, 0, f.orig.Len()) - - f.orig.Range(func(k string, v pcommon.Value) bool { - // Don't add source related attributes to fields as they are handled separately - // and are added to the payload either as special HTTP headers or as resources - // attributes. - if k == attributeKeySourceCategory || k == attributeKeySourceHost || k == attributeKeySourceName { - return true - } - - sv := v.AsString() - - // Skip empty field - if len(sv) == 0 { - return true - } - - key := []byte(k) - f.sanitizeField(key) - value := []byte(sv) - f.sanitizeField(value) - sb := strings.Builder{} - sb.Grow(len(key) + len(value) + 1) - sb.Write(key) - sb.WriteRune('=') - sb.Write(value) - - returnValue = append( - returnValue, - sb.String(), - ) - return true - }) - slices.Sort(returnValue) - - return strings.Join(returnValue, ", ") -} - -// sanitizeFields sanitize field (key or value) to be correctly parsed by sumologic receiver -// It modifies the field in place. -func (f fields) sanitizeField(fld []byte) { - for i := 0; i < len(fld); i++ { - switch fld[i] { - case ',': - fld[i] = '_' - case '=': - fld[i] = ':' - case '\n': - fld[i] = '_' - default: - } - } -} diff --git a/pkg/exporter/sumologicexporter/fields_test.go b/pkg/exporter/sumologicexporter/fields_test.go deleted file mode 100644 index c9198b85f3..0000000000 --- a/pkg/exporter/sumologicexporter/fields_test.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2020 OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/pdata/pcommon" -) - -func TestFields(t *testing.T) { - testcases := []struct { - name string - fields map[string]string - expected string - }{ - { - name: "string", - fields: map[string]string{ - "key1": "value1", - "key3": "value3", - "key2": "value2", - }, - expected: "key1=value1, key2=value2, key3=value3", - }, - { - name: "sanitization", - fields: map[string]string{ - "key1": "value,1", - "key3": "value\n3", - "key=,2": "valu,e=2", - }, - expected: "key1=value_1, key3=value_3, key:_2=valu_e:2", - }, - { - name: "empty element", - fields: map[string]string{ - "key1": "value1", - "key3": "value3", - "key2": "", - }, - expected: "key1=value1, key3=value3", - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - flds := fieldsFromMap(tc.fields) - - assert.Equal(t, tc.expected, flds.string()) - }) - } -} - -func BenchmarkFields(b *testing.B) { - attrMap := pcommon.NewMap() - flds := map[string]interface{}{ - "key1": "value1", - "key3": "value3", - "key2": "", - "map": map[string]string{ - "key1": "value1", - "key3": "value3", - "key2": "", - }, - } - for k, v := range flds { - switch v := v.(type) { - case string: - attrMap.PutStr(k, v) - case map[string]string: - m := pcommon.NewValueMap() - mm := m.Map().AsRaw() - for kk, vv := range v { - mm[kk] = vv - } - m.CopyTo(attrMap.PutEmpty(k)) - } - } - sut := newFields(attrMap) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = sut.string() - } -} diff --git a/pkg/exporter/sumologicexporter/filter.go b/pkg/exporter/sumologicexporter/filter.go deleted file mode 100644 index 828aeff3b5..0000000000 --- a/pkg/exporter/sumologicexporter/filter.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2020 OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "regexp" - - "go.opentelemetry.io/collector/pdata/pcommon" -) - -type filter struct { - regexes []*regexp.Regexp -} - -func newFilter(flds []string) (filter, error) { - metadataRegexes := make([]*regexp.Regexp, len(flds)) - - for i, fld := range flds { - regex, err := regexp.Compile(fld) - if err != nil { - return filter{}, err - } - - metadataRegexes[i] = regex - } - - return filter{ - regexes: metadataRegexes, - }, nil -} - -// filterIn returns fields which match at least one of the filter regexes -func (f *filter) filterIn(attributes pcommon.Map) fields { - returnValue := pcommon.NewMap() - - attributes.Range(func(k string, v pcommon.Value) bool { - for _, regex := range f.regexes { - if regex.MatchString(k) { - v.CopyTo(returnValue.PutEmpty(k)) - return true - } - } - return true - }) - - return newFields(returnValue) -} - -// filterOut returns fields which don't match any of the filter regexes -func (f *filter) filterOut(attributes pcommon.Map) fields { - returnValue := pcommon.NewMap() - - attributes.Range(func(k string, v pcommon.Value) bool { - for _, regex := range f.regexes { - if regex.MatchString(k) { - return true - } - } - v.CopyTo(returnValue.PutEmpty(k)) - return true - }) - - return newFields(returnValue) -} diff --git a/pkg/exporter/sumologicexporter/filter_test.go b/pkg/exporter/sumologicexporter/filter_test.go deleted file mode 100644 index 011a1d3227..0000000000 --- a/pkg/exporter/sumologicexporter/filter_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2020 OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/pdata/pcommon" -) - -func TestGetMetadata(t *testing.T) { - attributes := pcommon.NewMap() - attributes.PutStr("key3", "value3") - attributes.PutStr("key1", "value1") - attributes.PutStr("key2", "value2") - attributes.PutStr("additional_key2", "value2") - attributes.PutStr("additional_key3", "value3") - - regexes := []string{"^key[12]", "^key3"} - f, err := newFilter(regexes) - require.NoError(t, err) - - metadata := f.filterIn(attributes) - expected := fieldsFromMap(map[string]string{ - "key1": "value1", - "key2": "value2", - "key3": "value3", - }) - // Use string() because object comparison has not been reliable - assert.Equal(t, expected.string(), metadata.string()) -} - -func TestFilterOutMetadata(t *testing.T) { - attributes := pcommon.NewMap() - attributes.PutStr("key3", "value3") - attributes.PutStr("key1", "value1") - attributes.PutStr("key2", "value2") - attributes.PutStr("additional_key2", "value2") - attributes.PutStr("additional_key3", "value3") - - regexes := []string{"^key[12]", "^key3"} - f, err := newFilter(regexes) - require.NoError(t, err) - - data := f.filterOut(attributes) - expected := fieldsFromMap(map[string]string{ - "additional_key2": "value2", - "additional_key3": "value3", - }) - // Use string() because object comparison has not been reliable - assert.Equal(t, expected.string(), data.string()) -} diff --git a/pkg/exporter/sumologicexporter/go.mod b/pkg/exporter/sumologicexporter/go.mod deleted file mode 100644 index 718e137f99..0000000000 --- a/pkg/exporter/sumologicexporter/go.mod +++ /dev/null @@ -1,86 +0,0 @@ -module github.com/SumoLogic/sumologic-otel-collector/pkg/exporter/sumologicexporter - -go 1.22.0 - -toolchain go1.22.8 - -require ( - github.com/klauspost/compress v1.17.11 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/sumologicextension v0.114.0 - github.com/stretchr/testify v1.9.0 - go.opencensus.io v0.24.0 - go.opentelemetry.io/collector/component v0.114.0 - go.opentelemetry.io/collector/component/componenttest v0.114.0 - go.opentelemetry.io/collector/config/configauth v0.114.0 - go.opentelemetry.io/collector/config/configcompression v1.20.0 - go.opentelemetry.io/collector/config/confighttp v0.114.0 - go.opentelemetry.io/collector/config/configretry v1.20.0 - go.opentelemetry.io/collector/config/configtls v1.20.0 - go.opentelemetry.io/collector/consumer/consumererror v0.114.0 - go.opentelemetry.io/collector/exporter v0.114.0 - go.opentelemetry.io/collector/exporter/exportertest v0.114.0 - go.opentelemetry.io/collector/pdata v1.20.0 - go.uber.org/zap v1.27.0 - golang.org/x/exp v0.0.0-20230711023510-fffb14384f22 -) - -require ( - github.com/Showmax/go-fqdn v1.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/ebitengine/purego v0.8.1 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.8.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/hashicorp/go-version v1.7.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/mitchellh/go-ps v1.0.0 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pierrec/lz4/v4 v4.1.21 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/rs/cors v1.11.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.10 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect - github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opentelemetry.io/collector/client v1.20.0 // indirect - go.opentelemetry.io/collector/config/configopaque v1.20.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.114.0 // indirect - go.opentelemetry.io/collector/config/internal v0.114.0 // indirect - go.opentelemetry.io/collector/consumer v0.114.0 // indirect - go.opentelemetry.io/collector/consumer/consumerprofiles v0.114.0 // indirect - go.opentelemetry.io/collector/consumer/consumertest v0.114.0 // indirect - go.opentelemetry.io/collector/exporter/exporterprofiles v0.114.0 // indirect - go.opentelemetry.io/collector/extension v0.114.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.114.0 // indirect - go.opentelemetry.io/collector/extension/experimental/storage v0.114.0 // indirect - go.opentelemetry.io/collector/featuregate v1.20.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.114.0 // indirect - go.opentelemetry.io/collector/pipeline v0.114.0 // indirect - go.opentelemetry.io/collector/receiver v0.114.0 // indirect - go.opentelemetry.io/collector/receiver/receiverprofiles v0.114.0 // indirect - go.opentelemetry.io/collector/receiver/receivertest v0.114.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.31.0 // indirect - golang.org/x/sys v0.27.0 // indirect - golang.org/x/text v0.20.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect - google.golang.org/grpc v1.67.1 // indirect - google.golang.org/protobuf v1.35.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/pkg/exporter/sumologicexporter/go.sum b/pkg/exporter/sumologicexporter/go.sum deleted file mode 100644 index ade5faa4f5..0000000000 --- a/pkg/exporter/sumologicexporter/go.sum +++ /dev/null @@ -1,280 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Showmax/go-fqdn v1.0.0 h1:0rG5IbmVliNT5O19Mfuvna9LL7zlHyRfsSvBPZmF9tM= -github.com/Showmax/go-fqdn v1.0.0/go.mod h1:SfrFBzmDCtCGrnHhoDjuvFnKsWjEQX/Q9ARZvOrJAko= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/ebitengine/purego v0.8.1 h1:sdRKd6plj7KYW33EH5As6YKfe8m9zbN9JMrOjNVF/BE= -github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/sumologicextension v0.114.0 h1:0Bv7jA+ylHWvxCpaxeTQYMu/qruO6meFqmYcaxxcg18= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/sumologicextension v0.114.0/go.mod h1:9GjSKXmqpeKl2q3GbueDwDowXmhVX6nZItEvFX9T3/I= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= -github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/shirou/gopsutil/v4 v4.24.10 h1:7VOzPtfw/5YDU+jLEoBwXwxJbQetULywoSV4RYY7HkM= -github.com/shirou/gopsutil/v4 v4.24.10/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= -github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/client v1.20.0 h1:o60wPcj5nLtaRenF+1E5p4QXFS3TDL6vHlw+GOon3rg= -go.opentelemetry.io/collector/client v1.20.0/go.mod h1:6aqkszco9FaLWCxyJEVam6PP7cUa8mPRIXeS5eZGj0U= -go.opentelemetry.io/collector/component v0.114.0 h1:SVGbm5LvHGSTEDv7p92oPuBgK5tuiWR82I9+LL4TtBE= -go.opentelemetry.io/collector/component v0.114.0/go.mod h1:MLxtjZ6UVHjDxSdhGLuJfHBHvfl1iT/Y7IaQPD24Eww= -go.opentelemetry.io/collector/component/componenttest v0.114.0 h1:GM4FTTlfeXoVm6sZYBHImwlRN8ayh2oAfUhvaFj7Zo8= -go.opentelemetry.io/collector/component/componenttest v0.114.0/go.mod h1:ZZEJMtbJtoVC/3/9R1HzERq+cYQRxuMFQrPCpfZ4Xos= -go.opentelemetry.io/collector/config/configauth v0.114.0 h1:R2sJ6xpsLYGH0yU0vCxotzBYDKR/Hrjv0A7y9lwMyiw= -go.opentelemetry.io/collector/config/configauth v0.114.0/go.mod h1:3Z24KcCpG+WYCeQYfs/cNp5cP2BDeOqLCtOEgs/rPqM= -go.opentelemetry.io/collector/config/configcompression v1.20.0 h1:H/mvz7J/5z+O74YsO0t2tk+REnO2tzLM8TgIQ4AZ5w0= -go.opentelemetry.io/collector/config/configcompression v1.20.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU= -go.opentelemetry.io/collector/config/confighttp v0.114.0 h1:DjGsBvVm+mGK3IpJBaXianWhwcxEC1fF33cpuC1LY/I= -go.opentelemetry.io/collector/config/confighttp v0.114.0/go.mod h1:nrlNLxOZ+4JQaV9j0TiqQV7LOHhrRivPrT8nQRHED3Q= -go.opentelemetry.io/collector/config/configopaque v1.20.0 h1:2I48zKiyyyYqjm7y0B9OLp24ku2ZSX3nCHG0r5FdWOQ= -go.opentelemetry.io/collector/config/configopaque v1.20.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4= -go.opentelemetry.io/collector/config/configretry v1.20.0 h1:z679mrMlW2a6tOOYPGdrS/QfALxdzWLQUOpH8Uu+D5Y= -go.opentelemetry.io/collector/config/configretry v1.20.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0= -go.opentelemetry.io/collector/config/configtelemetry v0.114.0 h1:kjLeyrumge6wsX6ZIkicdNOlBXaEyW2PI2ZdVXz/rzY= -go.opentelemetry.io/collector/config/configtelemetry v0.114.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc= -go.opentelemetry.io/collector/config/configtls v1.20.0 h1:hNlJdwfyY5Qe54RLJ41lfLqKTn9ypkR7sk7JNCcSe2U= -go.opentelemetry.io/collector/config/configtls v1.20.0/go.mod h1:sav/txSHguadTYlSSK+BJO2ljJeYEtRoBahgzWAguYg= -go.opentelemetry.io/collector/config/internal v0.114.0 h1:uWSDWTJb8T6xRjKD9/XmEARakXnxgYVYKUeId78hErc= -go.opentelemetry.io/collector/config/internal v0.114.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc= -go.opentelemetry.io/collector/consumer v0.114.0 h1:1zVaHvfIZowGwZRitRBRo3i+RP2StlU+GClYiofSw0Q= -go.opentelemetry.io/collector/consumer v0.114.0/go.mod h1:d+Mrzt9hsH1ub3zmwSlnQVPLeTYir4Mgo7CrWfnncN4= -go.opentelemetry.io/collector/consumer/consumererror v0.114.0 h1:r2YiELfWerb40FHD23V04gNjIkLUcjEKGxI4Vtm2iO4= -go.opentelemetry.io/collector/consumer/consumererror v0.114.0/go.mod h1:MzIrLQ5jptO2egypolhlAbZsWZr29WC4FhSxQjnxcvg= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.114.0 h1:5pXYy3E6UK5Huu3aQbsYL8B6E6MyWx4fvXXDn+oXZaA= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.114.0/go.mod h1:PMq3f54KcJQO4v1tue0QxQScu7REFVADlXxXSAYMiN0= -go.opentelemetry.io/collector/consumer/consumertest v0.114.0 h1:isaTwJK5DOy8Bs7GuLq23ejfgj8gLIo5dOUvkRnLF4g= -go.opentelemetry.io/collector/consumer/consumertest v0.114.0/go.mod h1:GNeLPkfRPdh06n/Rv1UKa/cAtCKjN0a7ADyHjIj4HFE= -go.opentelemetry.io/collector/exporter v0.114.0 h1:5/0BBpXuCJQSQ5SQf31g7j6T4XEKkyx9mZMcA2rS5e8= -go.opentelemetry.io/collector/exporter v0.114.0/go.mod h1:atpd0wWXgh5LAZ0REU/d/Ti/q50HDfnlBIjMjJQlKFg= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.114.0 h1:/wmWOSBHcvtz3Pbv7+rWCqPPQuNvYaoidKKaOqZsLKs= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.114.0/go.mod h1:epRYTkyJZTSQZBJflMGHUyUo2EdLPhsaZEyo5Qm848A= -go.opentelemetry.io/collector/exporter/exportertest v0.114.0 h1:vo0idBJT+QACSM1KpjVLm9VeiXVwO7y4UnMpGxN6EyM= -go.opentelemetry.io/collector/exporter/exportertest v0.114.0/go.mod h1:420ssFrhaphneybstbMeSIiqSRoaBARPgO71O17foaM= -go.opentelemetry.io/collector/extension v0.114.0 h1:9Qb92y8hD2WDC5aMDoj4JNQN+/5BQYJWPUPzLXX+iGw= -go.opentelemetry.io/collector/extension v0.114.0/go.mod h1:Yk2/1ptVgfTr12t+22v93nYJpioP14pURv2YercSzU0= -go.opentelemetry.io/collector/extension/auth v0.114.0 h1:1K2qh4yvG8kKR/sTAobI/rw5VxzPZoKcl3FmC195vvo= -go.opentelemetry.io/collector/extension/auth v0.114.0/go.mod h1:IjtsG+jUVJB0utKF8dAK8pLutRun3aEgASshImzsw/U= -go.opentelemetry.io/collector/extension/experimental/storage v0.114.0 h1:hLyX9UvmY0t6iBnk3CqvyNck2U0QjPACekj7pDRx2hA= -go.opentelemetry.io/collector/extension/experimental/storage v0.114.0/go.mod h1:WqYRQVJjJLE1rm+y/ks1wPdPRGWePEvE1VO07xm2J2k= -go.opentelemetry.io/collector/extension/extensiontest v0.114.0 h1:ibXDms1qrswlvlR6b3d2BeyI8sXUXoFV11yOi9Sop8o= -go.opentelemetry.io/collector/extension/extensiontest v0.114.0/go.mod h1:/bOYmqu5yTDfI1bJZUxFqm8ZtmcodpquebiSxiQxtDY= -go.opentelemetry.io/collector/featuregate v1.20.0 h1:Mi7nMy/q52eruI+6jWnMKUOeM55XvwoPnGcdB1++O8c= -go.opentelemetry.io/collector/featuregate v1.20.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs= -go.opentelemetry.io/collector/pdata v1.20.0 h1:ePcwt4bdtISP0loHaE+C9xYoU2ZkIvWv89Fob16o9SM= -go.opentelemetry.io/collector/pdata v1.20.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs= -go.opentelemetry.io/collector/pdata/pprofile v0.114.0 h1:pUNfTzsI/JUTiE+DScDM4lsrPoxnVNLI2fbTxR/oapo= -go.opentelemetry.io/collector/pdata/pprofile v0.114.0/go.mod h1:4aNcj6WM1n1uXyFSXlhVs4ibrERgNYsTbzcYI2zGhxA= -go.opentelemetry.io/collector/pdata/testdata v0.114.0 h1:+AzszWSL1i4K6meQ8rU0JDDW55SYCXa6FVqfDixhhTo= -go.opentelemetry.io/collector/pdata/testdata v0.114.0/go.mod h1:bv8XFdCTZxG2MQB5l9dKxSxf5zBrcodwO6JOy1+AxXM= -go.opentelemetry.io/collector/pipeline v0.114.0 h1:v3YOhc5z0tD6QbO5n/pnftpIeroihM2ks9Z2yKPCcwY= -go.opentelemetry.io/collector/pipeline v0.114.0/go.mod h1:4vOvjVsoYTHVGTbfFwqfnQOSV2K3RKUHofh3jNRc2Mg= -go.opentelemetry.io/collector/receiver v0.114.0 h1:90SAnXAjNq7/k52/pFmmb06Cf1YauoPYtbio4aOXafY= -go.opentelemetry.io/collector/receiver v0.114.0/go.mod h1:KUGT0/D953LXbGH/D3lLPU8yrU3HfWnUqpt4W4hSOnE= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.114.0 h1:ibhEfGpvNB3yrtpl2jYFabrunMk1hurxvMYpM0b1Ck4= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.114.0/go.mod h1:UZyRfaasw+NLvN10AN8IQnmj5tQ3BOUH1uP2ctpO9f0= -go.opentelemetry.io/collector/receiver/receivertest v0.114.0 h1:D+Kh9t2n4asTnM+TiSxbrKlUemLZandWntj17BJWWb0= -go.opentelemetry.io/collector/receiver/receivertest v0.114.0/go.mod h1:mNSHQ13vFmqD+VAcRzLjStFBejbcWUn2Mp0pAd7Op+U= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230711023510-fffb14384f22 h1:FqrVOBQxQ8r/UwwXibI0KMolVhvFiGobSfdE33deHJM= -golang.org/x/exp v0.0.0-20230711023510-fffb14384f22/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/exporter/sumologicexporter/internal/observability/observability.go b/pkg/exporter/sumologicexporter/internal/observability/observability.go deleted file mode 100644 index 6076bfe713..0000000000 --- a/pkg/exporter/sumologicexporter/internal/observability/observability.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2022 Sumo Logic, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package observability - -import ( - "context" - "fmt" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -func init() { - err := view.Register( - viewRequestsSent, - viewRequestsDuration, - viewRequestsBytes, - viewRequestsRecords, - ) - if err != nil { - fmt.Printf("Failed to register sumologic exporter's views: %v\n", err) - } -} - -var ( - mRequestsSent = stats.Int64("exporter/requests/sent", "Number of requests", "1") - mRequestsDuration = stats.Int64("exporter/requests/duration", "Duration of HTTP requests (in milliseconds)", "0") - mRequestsBytes = stats.Int64("exporter/requests/bytes", "Total size of requests (in bytes)", "0") - mRequestsRecords = stats.Int64("exporter/requests/records", "Total size of requests (in number of records)", "0") - - statusKey, _ = tag.NewKey("status_code") // nolint:errcheck - endpointKey, _ = tag.NewKey("endpoint") // nolint:errcheck - pipelineKey, _ = tag.NewKey("pipeline") // nolint:errcheck - exporterKey, _ = tag.NewKey("exporter") // nolint:errcheck -) - -var viewRequestsSent = &view.View{ - Name: mRequestsSent.Name(), - Description: mRequestsSent.Description(), - Measure: mRequestsSent, - TagKeys: []tag.Key{statusKey, endpointKey, pipelineKey, exporterKey}, - Aggregation: view.Count(), -} - -var viewRequestsDuration = &view.View{ - Name: mRequestsDuration.Name(), - Description: mRequestsDuration.Description(), - Measure: mRequestsDuration, - TagKeys: []tag.Key{statusKey, endpointKey, pipelineKey, exporterKey}, - Aggregation: view.Sum(), -} - -var viewRequestsBytes = &view.View{ - Name: mRequestsBytes.Name(), - Description: mRequestsBytes.Description(), - Measure: mRequestsBytes, - TagKeys: []tag.Key{statusKey, endpointKey, pipelineKey, exporterKey}, - Aggregation: view.Sum(), -} - -var viewRequestsRecords = &view.View{ - Name: mRequestsRecords.Name(), - Description: mRequestsRecords.Description(), - Measure: mRequestsRecords, - TagKeys: []tag.Key{statusKey, endpointKey, pipelineKey, exporterKey}, - Aggregation: view.Sum(), -} - -// RecordRequestsSent increments the metric that records sent requests -func RecordRequestsSent(statusCode int, endpoint string, pipeline string, exporter string) error { - return stats.RecordWithTags( - context.Background(), - []tag.Mutator{ - tag.Insert(statusKey, fmt.Sprint(statusCode)), - tag.Insert(endpointKey, endpoint), - tag.Insert(pipelineKey, pipeline), - tag.Insert(exporterKey, exporter), - }, - mRequestsSent.M(int64(1)), - ) -} - -// RecordRequestsDuration update metric which records request duration -func RecordRequestsDuration(duration time.Duration, statusCode int, endpoint string, pipeline string, exporter string) error { - return stats.RecordWithTags( - context.Background(), - []tag.Mutator{ - tag.Insert(statusKey, fmt.Sprint(statusCode)), - tag.Insert(endpointKey, endpoint), - tag.Insert(pipelineKey, pipeline), - tag.Insert(exporterKey, exporter), - }, - mRequestsDuration.M(duration.Milliseconds()), - ) -} - -// RecordRequestsBytes update metric which records number of send bytes -func RecordRequestsBytes(bytes int64, statusCode int, endpoint string, pipeline string, exporter string) error { - return stats.RecordWithTags( - context.Background(), - []tag.Mutator{ - tag.Insert(statusKey, fmt.Sprint(statusCode)), - tag.Insert(endpointKey, endpoint), - tag.Insert(pipelineKey, pipeline), - tag.Insert(exporterKey, exporter), - }, - mRequestsBytes.M(bytes), - ) -} - -// RecordRequestsRecords update metric which records number of sent records -func RecordRequestsRecords(records int64, statusCode int, endpoint string, pipeline string, exporter string) error { - return stats.RecordWithTags( - context.Background(), - []tag.Mutator{ - tag.Insert(statusKey, fmt.Sprint(statusCode)), - tag.Insert(endpointKey, endpoint), - tag.Insert(pipelineKey, pipeline), - tag.Insert(exporterKey, exporter), - }, - mRequestsRecords.M(records), - ) -} diff --git a/pkg/exporter/sumologicexporter/internal/observability/observability_test.go b/pkg/exporter/sumologicexporter/internal/observability/observability_test.go deleted file mode 100644 index f3e6849b83..0000000000 --- a/pkg/exporter/sumologicexporter/internal/observability/observability_test.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2020 OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package observability - -import ( - "context" - "sort" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/metric/metricexport" -) - -type exporter struct { - pipe chan *metricdata.Metric -} - -func newExporter() *exporter { - return &exporter{ - make(chan *metricdata.Metric), - } -} - -// Run goroutine which is going to receive `after` metrics. -// Goroutine is writing to returned chan -func (e *exporter) ReturnAfter(after int) chan []*metricdata.Metric { - ch := make(chan []*metricdata.Metric) - go func() { - received := []*metricdata.Metric{} - for m := range e.pipe { - received = append(received, m) - if len(received) >= after { - break - } - } - ch <- received - }() - return ch -} - -// Write received metrics to data channel -func (e *exporter) ExportMetrics(ctx context.Context, data []*metricdata.Metric) error { - for _, m := range data { - e.pipe <- m - } - return nil -} - -// Creates metrics reader and forward metrics from it to chData -// Sens empty structs to fail chan afterwards -func metricReader(chData chan []*metricdata.Metric, fail chan struct{}, count int) { - - // Add a manual retry mechanism in case there's a hiccup reading the - // metrics from producers in ReadAndExport(): we can wait for the metrics - // to come instead of failing because they didn't come right away. - for i := 0; i < 10; i++ { - e := newExporter() - ch := e.ReturnAfter(count) - go metricexport.NewReader().ReadAndExport(e) - - select { - case <-time.After(500 * time.Millisecond): - - case data := <-ch: - chData <- data - return - } - } - - fail <- struct{}{} -} - -// NOTE: -// This test can only be run with -count 1 because of static -// metricproducer.GlobalManager() used in metricexport.NewReader(). -func TestMetrics(t *testing.T) { - const ( - statusCode = 200 - endpoint = "some/uri" - pipeline = "metrics" - exporter = "sumologic/my-name" - bytesFunc = "bytes" - recordsFunc = "records" - durationFunc = "duration" - sentFunc = "sent" - ) - type testCase struct { - name string - bytes int64 - records int64 - recordFunc string - duration time.Duration - } - tests := []testCase{ - { - name: "exporter/requests/sent", - recordFunc: sentFunc, - }, - { - name: "exporter/requests/duration", - recordFunc: durationFunc, - duration: time.Millisecond, - }, - { - name: "exporter/requests/bytes", - recordFunc: bytesFunc, - bytes: 1, - }, - { - name: "exporter/requests/records", - recordFunc: recordsFunc, - records: 1, - }, - } - - var ( - fail = make(chan struct{}) - chData = make(chan []*metricdata.Metric) - ) - - go metricReader(chData, fail, len(tests)) - - for _, tt := range tests { - switch tt.recordFunc { - case sentFunc: - require.NoError(t, RecordRequestsSent(statusCode, endpoint, pipeline, exporter)) - case durationFunc: - require.NoError(t, RecordRequestsDuration(tt.duration, statusCode, endpoint, pipeline, exporter)) - case bytesFunc: - require.NoError(t, RecordRequestsBytes(tt.bytes, statusCode, endpoint, pipeline, exporter)) - case recordsFunc: - require.NoError(t, RecordRequestsRecords(tt.records, statusCode, endpoint, pipeline, exporter)) - } - } - - var data []*metricdata.Metric - select { - case <-fail: - t.Fatalf("timedout waiting for metrics to arrive") - case data = <-chData: - } - - sort.Slice(tests, func(i, j int) bool { - return tests[i].name < tests[j].name - }) - - sort.Slice(data, func(i, j int) bool { - return data[i].Descriptor.Name < data[j].Descriptor.Name - }) - - for i, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require.Len(t, data, len(tests)) - d := data[i] - assert.Equal(t, tt.name, d.Descriptor.Name, "Expected %v at index %v, but got %v.", tt.name, i, d.Descriptor.Name) - require.Len(t, d.TimeSeries, 1) - require.Len(t, d.TimeSeries[0].Points, 1) - assert.Equal(t, d.TimeSeries[0].Points[0].Value, int64(1)) - - require.Len(t, d.TimeSeries[0].LabelValues, 4) - - require.True(t, d.TimeSeries[0].LabelValues[0].Present) - require.True(t, d.TimeSeries[0].LabelValues[1].Present) - require.True(t, d.TimeSeries[0].LabelValues[2].Present) - require.True(t, d.TimeSeries[0].LabelValues[3].Present) - - assert.Equal(t, d.TimeSeries[0].LabelValues[0].Value, "some/uri") - assert.Equal(t, d.TimeSeries[0].LabelValues[1].Value, "sumologic/my-name") - assert.Equal(t, d.TimeSeries[0].LabelValues[2].Value, "metrics") - assert.Equal(t, d.TimeSeries[0].LabelValues[3].Value, "200") - }) - } -} diff --git a/pkg/exporter/sumologicexporter/otlp.go b/pkg/exporter/sumologicexporter/otlp.go deleted file mode 100644 index 19d68b6ca6..0000000000 --- a/pkg/exporter/sumologicexporter/otlp.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2023, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "math" - - "go.opentelemetry.io/collector/pdata/pmetric" -) - -// DecomposeHistograms decomposes any histograms present in the metric data into individual Sums and Gauges -// This is a noop if no Histograms are present, but otherwise makes a copy of the whole structure -// This exists because Sumo doesn't support OTLP histograms yet, and has the same semantics as the conversion to Prometheus format in prometheus_formatter.go -func DecomposeHistograms(md pmetric.Metrics) pmetric.Metrics { - // short circuit and do nothing if no Histograms are present - foundHistogram := false -outer: - for i := 0; i < md.ResourceMetrics().Len(); i++ { - resourceMetric := md.ResourceMetrics().At(i) - for j := 0; j < resourceMetric.ScopeMetrics().Len(); j++ { - scopeMetric := resourceMetric.ScopeMetrics().At(j) - for k := 0; k < scopeMetric.Metrics().Len(); k++ { - foundHistogram = scopeMetric.Metrics().At(k).Type() == pmetric.MetricTypeHistogram - if foundHistogram { - break outer - } - } - } - } - if !foundHistogram { - return md - } - - decomposed := pmetric.NewMetrics() - md.CopyTo(decomposed) - - for i := 0; i < decomposed.ResourceMetrics().Len(); i++ { - resourceMetric := decomposed.ResourceMetrics().At(i) - for j := 0; j < resourceMetric.ScopeMetrics().Len(); j++ { - metrics := resourceMetric.ScopeMetrics().At(j).Metrics() - for k := 0; k < metrics.Len(); k++ { - metric := metrics.At(k) - if metric.Type() == pmetric.MetricTypeHistogram { - decomposedHistogram := decomposeHistogram(metric) - decomposedHistogram.MoveAndAppendTo(metrics) - } - } - metrics.RemoveIf(func(m pmetric.Metric) bool { return m.Type() == pmetric.MetricTypeHistogram }) - } - } - - return decomposed -} - -// decomposeHistogram decomposes a single Histogram metric into individual metrics for count, bucket and sum -// non-Histograms give an empty slice as output -func decomposeHistogram(metric pmetric.Metric) pmetric.MetricSlice { - output := pmetric.NewMetricSlice() - if metric.Type() != pmetric.MetricTypeHistogram { - return output - } - - getHistogramSumMetric(metric).MoveTo(output.AppendEmpty()) - getHistogramCountMetric(metric).MoveTo(output.AppendEmpty()) - getHistogramBucketsMetric(metric).MoveTo(output.AppendEmpty()) - - return output -} - -func getHistogramBucketsMetric(metric pmetric.Metric) pmetric.Metric { - histogram := metric.Histogram() - - bucketsMetric := pmetric.NewMetric() - bucketsMetric.SetName(metric.Name() + "_bucket") - bucketsMetric.SetDescription(metric.Description()) - bucketsMetric.SetUnit(metric.Unit()) - bucketsMetric.SetEmptyGauge() - bucketsDatapoints := bucketsMetric.Gauge().DataPoints() - - for i := 0; i < histogram.DataPoints().Len(); i++ { - histogramDataPoint := histogram.DataPoints().At(i) - histogramBounds := histogramDataPoint.ExplicitBounds() - var cumulative uint64 = 0 - - for j := 0; j < histogramBounds.Len(); j++ { - bucketDataPoint := bucketsDatapoints.AppendEmpty() - bound := histogramBounds.At(j) - histogramDataPoint.Attributes().CopyTo(bucketDataPoint.Attributes()) - bucketDataPoint.Attributes().PutDouble(prometheusLeTag, bound) - bucketDataPoint.SetStartTimestamp(histogramDataPoint.StartTimestamp()) - bucketDataPoint.SetTimestamp(histogramDataPoint.Timestamp()) - cumulative += histogramDataPoint.BucketCounts().At(j) - bucketDataPoint.SetIntValue(int64(cumulative)) - } - - // need to add one more bucket at +Inf - bucketDataPoint := bucketsDatapoints.AppendEmpty() - histogramDataPoint.Attributes().CopyTo(bucketDataPoint.Attributes()) - bucketDataPoint.Attributes().PutDouble(prometheusLeTag, math.Inf(1)) - bucketDataPoint.SetStartTimestamp(histogramDataPoint.StartTimestamp()) - bucketDataPoint.SetTimestamp(histogramDataPoint.Timestamp()) - cumulative += histogramDataPoint.BucketCounts().At(histogramDataPoint.ExplicitBounds().Len()) - bucketDataPoint.SetIntValue(int64(cumulative)) - } - return bucketsMetric -} - -func getHistogramSumMetric(metric pmetric.Metric) pmetric.Metric { - histogram := metric.Histogram() - - sumMetric := pmetric.NewMetric() - sumMetric.SetName(metric.Name() + "_sum") - sumMetric.SetDescription(metric.Description()) - sumMetric.SetUnit(metric.Unit()) - sumMetric.SetEmptyGauge() - sumDataPoints := sumMetric.Gauge().DataPoints() - - for i := 0; i < histogram.DataPoints().Len(); i++ { - histogramDataPoint := histogram.DataPoints().At(i) - sumDataPoint := sumDataPoints.AppendEmpty() - histogramDataPoint.Attributes().CopyTo(sumDataPoint.Attributes()) - sumDataPoint.SetStartTimestamp(histogramDataPoint.StartTimestamp()) - sumDataPoint.SetTimestamp(histogramDataPoint.Timestamp()) - sumDataPoint.SetDoubleValue(histogramDataPoint.Sum()) - } - return sumMetric -} - -func getHistogramCountMetric(metric pmetric.Metric) pmetric.Metric { - histogram := metric.Histogram() - - countMetric := pmetric.NewMetric() - countMetric.SetName(metric.Name() + "_count") - countMetric.SetDescription(metric.Description()) - countMetric.SetUnit(metric.Unit()) - countMetric.SetEmptyGauge() - countDataPoints := countMetric.Gauge().DataPoints() - - for i := 0; i < histogram.DataPoints().Len(); i++ { - histogramDataPoint := histogram.DataPoints().At(i) - countDataPoint := countDataPoints.AppendEmpty() - histogramDataPoint.Attributes().CopyTo(countDataPoint.Attributes()) - countDataPoint.SetStartTimestamp(histogramDataPoint.StartTimestamp()) - countDataPoint.SetTimestamp(histogramDataPoint.Timestamp()) - countDataPoint.SetIntValue(int64(histogramDataPoint.Count())) - } - return countMetric -} diff --git a/pkg/exporter/sumologicexporter/otlp_test.go b/pkg/exporter/sumologicexporter/otlp_test.go deleted file mode 100644 index f28b3f0a7c..0000000000 --- a/pkg/exporter/sumologicexporter/otlp_test.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2023, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "math" - "testing" - - "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" -) - -const ( - timestamp1 = 1618124444.169 * 1e9 - timestamp2 = 1608424699.186 * 1e9 -) - -func TestHistogramDecomposeNoHistogram(t *testing.T) { - metric, resourceAttributes := exampleIntGaugeMetric() - metrics := pmetric.NewMetrics() - resourceAttributes.CopyTo(metrics.ResourceMetrics().AppendEmpty().Resource().Attributes()) - metric.MoveTo(metrics.ResourceMetrics().At(0).ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()) - decomposedMetrics := DecomposeHistograms(metrics) - assert.Equal(t, metrics, decomposedMetrics) -} - -func TestHistogramDecompose(t *testing.T) { - metrics := metricsWithHistogram() - decomposedMetrics := DecomposeHistograms(metrics) - assert.Equal(t, metrics.ResourceMetrics().At(0).Resource(), decomposedMetrics.ResourceMetrics().At(0).Resource()) - expectedMetrics := pmetric.NewMetrics() - expectedResourceMetric := expectedMetrics.ResourceMetrics().AppendEmpty() - metrics.ResourceMetrics().At(0).Resource().Attributes().CopyTo(expectedResourceMetric.Resource().Attributes()) - expectedMetricSlice := expectedResourceMetric.ScopeMetrics().AppendEmpty().Metrics() - addExpectedHistogramSum(expectedMetricSlice) - addExpectedHistogramCount(expectedMetricSlice) - addExpectedHistogramBuckets(expectedMetricSlice) - assert.Equal(t, expectedMetrics, decomposedMetrics) -} - -func metricsWithHistogram() pmetric.Metrics { - metrics := pmetric.NewMetrics() - resourceMetric := metrics.ResourceMetrics().AppendEmpty() - resourceMetric.Resource().Attributes().PutStr("key", "value") - scopeMetric := resourceMetric.ScopeMetrics().AppendEmpty() - metric := scopeMetric.Metrics().AppendEmpty() - - metric.SetEmptyHistogram() - metric.SetUnit("unit") - metric.SetName("histogram_metric_double_test") - metric.SetDescription("Test histogram metric") - - dp := metric.Histogram().DataPoints().AppendEmpty() - dp.Attributes().PutStr("container", "dolor") - - si := pcommon.NewUInt64Slice() - si.FromRaw([]uint64{0, 12, 7, 5, 8, 13}) - si.CopyTo(dp.BucketCounts()) - - sf := pcommon.NewFloat64Slice() - sf.FromRaw([]float64{0.1, 0.2, 0.5, 0.8, 1}) - sf.CopyTo(dp.ExplicitBounds()) - - dp.SetTimestamp(timestamp1) - dp.SetSum(45.6) - dp.SetCount(45) - - dp = metric.Histogram().DataPoints().AppendEmpty() - dp.Attributes().PutStr("container", "sit") - - si = pcommon.NewUInt64Slice() - si.FromRaw([]uint64{0, 10, 1, 1, 4, 6}) - si.CopyTo(dp.BucketCounts()) - - sf = pcommon.NewFloat64Slice() - sf.FromRaw([]float64{0.1, 0.2, 0.5, 0.8, 1}) - sf.CopyTo(dp.ExplicitBounds()) - - dp.SetTimestamp(timestamp2) - dp.SetSum(54.1) - dp.SetCount(22) - - return metrics -} - -func addExpectedHistogramSum(metrics pmetric.MetricSlice) { - metric := metrics.AppendEmpty() - metric.SetName("histogram_metric_double_test_sum") - metric.SetDescription("Test histogram metric") - metric.SetUnit("unit") - metric.SetEmptyGauge() - - dataPoint := metric.Gauge().DataPoints().AppendEmpty() - dataPoint.Attributes().PutStr("container", "dolor") - dataPoint.SetTimestamp(timestamp1) - dataPoint.SetDoubleValue(45.6) - - dataPoint = metric.Gauge().DataPoints().AppendEmpty() - dataPoint.Attributes().PutStr("container", "sit") - dataPoint.SetTimestamp(timestamp2) - dataPoint.SetDoubleValue(54.1) -} - -func addExpectedHistogramCount(metrics pmetric.MetricSlice) { - metric := metrics.AppendEmpty() - metric.SetName("histogram_metric_double_test_count") - metric.SetDescription("Test histogram metric") - metric.SetUnit("unit") - metric.SetEmptyGauge() - - dataPoint := metric.Gauge().DataPoints().AppendEmpty() - dataPoint.Attributes().PutStr("container", "dolor") - dataPoint.SetTimestamp(timestamp1) - dataPoint.SetIntValue(45) - - dataPoint = metric.Gauge().DataPoints().AppendEmpty() - dataPoint.Attributes().PutStr("container", "sit") - dataPoint.SetTimestamp(timestamp2) - dataPoint.SetIntValue(22) -} - -func addExpectedHistogramBuckets(metrics pmetric.MetricSlice) { - metric := metrics.AppendEmpty() - metric.SetName("histogram_metric_double_test_bucket") - metric.SetDescription("Test histogram metric") - metric.SetUnit("unit") - metric.SetEmptyGauge() - histogramBuckets := []struct { - float64 - int64 - }{ - {0.1, 0}, - {0.2, 12}, - {0.5, 19}, - {0.8, 24}, - {1, 32}, - {math.Inf(1), 45}, - } - for _, pair := range histogramBuckets { - bound, bucketCount := pair.float64, pair.int64 - dataPoint := metric.Gauge().DataPoints().AppendEmpty() - dataPoint.Attributes().PutStr("container", "dolor") - dataPoint.Attributes().PutDouble(prometheusLeTag, bound) - dataPoint.SetTimestamp(timestamp1) - dataPoint.SetIntValue(bucketCount) - } - - histogramBuckets = []struct { - float64 - int64 - }{ - {0.1, 0}, - {0.2, 10}, - {0.5, 11}, - {0.8, 12}, - {1, 16}, - {math.Inf(1), 22}, - } - for _, pair := range histogramBuckets { - bound, bucketCount := pair.float64, pair.int64 - dataPoint := metric.Gauge().DataPoints().AppendEmpty() - dataPoint.Attributes().PutStr("container", "sit") - dataPoint.Attributes().PutDouble(prometheusLeTag, bound) - dataPoint.SetTimestamp(timestamp2) - dataPoint.SetIntValue(bucketCount) - } - -} diff --git a/pkg/exporter/sumologicexporter/prometheus_formatter.go b/pkg/exporter/sumologicexporter/prometheus_formatter.go deleted file mode 100644 index c5e4a860a8..0000000000 --- a/pkg/exporter/sumologicexporter/prometheus_formatter.go +++ /dev/null @@ -1,422 +0,0 @@ -// Copyright 2020, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "fmt" - "regexp" - "strings" - "time" - - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" -) - -type dataPoint interface { - Timestamp() pcommon.Timestamp - Attributes() pcommon.Map -} - -type prometheusFormatter struct { - sanitNameRegex *regexp.Regexp - replacer *strings.Replacer -} - -type prometheusTags string - -const ( - prometheusLeTag string = "le" - prometheusQuantileTag string = "quantile" - prometheusInfValue string = "+Inf" -) - -func newPrometheusFormatter() (prometheusFormatter, error) { - sanitNameRegex, err := regexp.Compile(`[^0-9a-zA-Z\./_:\-]`) - if err != nil { - return prometheusFormatter{}, err - } - - return prometheusFormatter{ - sanitNameRegex: sanitNameRegex, - // `\`, `"` and `\n` should be escaped, everything else should be left as-is - // see: https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#line-format - replacer: strings.NewReplacer(`\`, `\\`, `"`, `\"`, "\n", `\n`), - }, nil -} - -// PrometheusLabels returns all attributes as sanitized prometheus labels string -func (f *prometheusFormatter) tags2String(attr pcommon.Map, labels pcommon.Map) prometheusTags { - attrsPlusLabelsLen := attr.Len() + labels.Len() - if attrsPlusLabelsLen == 0 { - return "" - } - - mergedAttributes := pcommon.NewMap() - mergedAttributes.EnsureCapacity(attrsPlusLabelsLen) - - attr.CopyTo(mergedAttributes) - labels.Range(func(k string, v pcommon.Value) bool { - mergedAttributes.PutStr(k, v.AsString()) - return true - }) - length := mergedAttributes.Len() - - returnValue := make([]string, 0, length) - mergedAttributes.Range(func(k string, v pcommon.Value) bool { - key := f.sanitizeKeyBytes([]byte(k)) - value := f.sanitizeValue(v.AsString()) - - returnValue = append( - returnValue, - formatKeyValuePair(key, value), - ) - return true - }) - - return prometheusTags(stringsJoinAndSurround(returnValue, ",", "{", "}")) -} - -func formatKeyValuePair(key []byte, value string) string { - const ( - quoteSign = `"` - equalSign = `=` - ) - - // Use strings.Builder and not fmt.Sprintf as it uses significantly less - // allocations. - sb := strings.Builder{} - // We preallocate space for key, value, equal sign and quotes. - sb.Grow(len(key) + len(equalSign) + 2*len(quoteSign) + len(value)) - sb.Write(key) - sb.WriteString(equalSign) - sb.WriteString(quoteSign) - sb.WriteString(value) - sb.WriteString(quoteSign) - return sb.String() -} - -// stringsJoinAndSurround joins the strings in s slice using the separator adds front -// to the front of the resulting string and back at the end. -// -// This has a benefit over using the strings.Join() of using just one strings.Buidler -// instance and hence using less allocations to produce the final string. -func stringsJoinAndSurround(s []string, separator, front, back string) string { - switch len(s) { - case 0: - return "" - case 1: - var b strings.Builder - b.Grow(len(s[0]) + len(front) + len(back)) - b.WriteString(front) - b.WriteString(s[0]) - b.WriteString(back) - return b.String() - } - - // Count the total strings summarized length for the preallocation. - n := len(front) + len(s[0]) - for i := 1; i < len(s); i++ { - n += len(separator) + len(s[i]) - } - n += len(back) - - var b strings.Builder - // We preallocate space for all the entires in the provided slice together with - // the separator as well as the surrounding characters. - b.Grow(n) - b.WriteString(front) - b.WriteString(s[0]) - for _, s := range s[1:] { - b.WriteString(separator) - b.WriteString(s) - } - b.WriteString(back) - return b.String() -} - -// sanitizeKeyBytes returns sanitized key byte slice by replacing -// all non-allowed chars with `_` -func (f *prometheusFormatter) sanitizeKeyBytes(s []byte) []byte { - return f.sanitNameRegex.ReplaceAll(s, []byte{'_'}) -} - -// sanitizeKey returns sanitized value string performing the following substitutions: -// `/` -> `//` -// `"` -> `\"` -// "\n" -> `\n` -func (f *prometheusFormatter) sanitizeValue(s string) string { - return f.replacer.Replace(s) -} - -// doubleLine builds metric based on the given arguments where value is float64 -func (f *prometheusFormatter) doubleLine(name string, attributes prometheusTags, value float64, timestamp pcommon.Timestamp) string { - return fmt.Sprintf( - "%s%s %g %d", - f.sanitizeKeyBytes([]byte(name)), - attributes, - value, - timestamp/pcommon.Timestamp(time.Millisecond), - ) -} - -// intLine builds metric based on the given arguments where value is int64 -func (f *prometheusFormatter) intLine(name string, attributes prometheusTags, value int64, timestamp pcommon.Timestamp) string { - return fmt.Sprintf( - "%s%s %d %d", - f.sanitizeKeyBytes([]byte(name)), - attributes, - value, - timestamp/pcommon.Timestamp(time.Millisecond), - ) -} - -// uintLine builds metric based on the given arguments where value is uint64 -func (f *prometheusFormatter) uintLine(name string, attributes prometheusTags, value uint64, timestamp pcommon.Timestamp) string { - return fmt.Sprintf( - "%s%s %d %d", - f.sanitizeKeyBytes([]byte(name)), - attributes, - value, - timestamp/pcommon.Timestamp(time.Millisecond), - ) -} - -// doubleValueLine returns prometheus line with given value -func (f *prometheusFormatter) doubleValueLine(name string, value float64, dp dataPoint, attributes pcommon.Map) string { - return f.doubleLine( - name, - f.tags2String(attributes, dp.Attributes()), - value, - dp.Timestamp(), - ) -} - -// uintValueLine returns prometheus line with given value -func (f *prometheusFormatter) uintValueLine(name string, value uint64, dp dataPoint, attributes pcommon.Map) string { - return f.uintLine( - name, - f.tags2String(attributes, dp.Attributes()), - value, - dp.Timestamp(), - ) -} - -// numberDataPointValueLine returns prometheus line with value from pmetric.NumberDataPoint -func (f *prometheusFormatter) numberDataPointValueLine(name string, dp pmetric.NumberDataPoint, attributes pcommon.Map) string { - switch dp.ValueType() { - case pmetric.NumberDataPointValueTypeDouble: - return f.doubleValueLine( - name, - dp.DoubleValue(), - dp, - attributes, - ) - case pmetric.NumberDataPointValueTypeInt: - return f.intLine( - name, - f.tags2String(attributes, dp.Attributes()), - dp.IntValue(), - dp.Timestamp(), - ) - } - return "" -} - -// sumMetric returns _sum suffixed metric name -func (f *prometheusFormatter) sumMetric(name string) string { - return fmt.Sprintf("%s_sum", name) -} - -// countMetric returns _count suffixed metric name -func (f *prometheusFormatter) countMetric(name string) string { - return fmt.Sprintf("%s_count", name) -} - -// bucketMetric returns _bucket suffixed metric name -func (f *prometheusFormatter) bucketMetric(name string) string { - return fmt.Sprintf("%s_bucket", name) -} - -// mergeAttributes gets two pcommon.Maps and returns new which contains values from both of them -func (f *prometheusFormatter) mergeAttributes(attributes pcommon.Map, additionalAttributes pcommon.Map) pcommon.Map { - mergedAttributes := pcommon.NewMap() - mergedAttributes.EnsureCapacity(attributes.Len() + additionalAttributes.Len()) - - attributes.CopyTo(mergedAttributes) - additionalAttributes.Range(func(k string, v pcommon.Value) bool { - v.CopyTo(mergedAttributes.PutEmpty(k)) - return true - }) - return mergedAttributes -} - -// doubleGauge2Strings converts DoubleGauge record to a list of strings (one per dataPoint) -func (f *prometheusFormatter) gauge2Strings(metric pmetric.Metric, attributes pcommon.Map) []string { - dps := metric.Gauge().DataPoints() - lines := make([]string, 0, dps.Len()) - - for i := 0; i < dps.Len(); i++ { - dp := dps.At(i) - line := f.numberDataPointValueLine( - metric.Name(), - dp, - attributes, - ) - lines = append(lines, line) - } - - return lines -} - -// doubleSum2Strings converts Sum record to a list of strings (one per dataPoint) -func (f *prometheusFormatter) sum2Strings(metric pmetric.Metric, attributes pcommon.Map) []string { - dps := metric.Sum().DataPoints() - lines := make([]string, 0, dps.Len()) - - for i := 0; i < dps.Len(); i++ { - dp := dps.At(i) - line := f.numberDataPointValueLine( - metric.Name(), - dp, - attributes, - ) - lines = append(lines, line) - } - - return lines -} - -// summary2Strings converts Summary record to a list of strings -// n+2 where n is number of quantiles and 2 stands for sum and count metrics per each data point -func (f *prometheusFormatter) summary2Strings(metric pmetric.Metric, attributes pcommon.Map) []string { - dps := metric.Summary().DataPoints() - var lines []string - - for i := 0; i < dps.Len(); i++ { - dp := dps.At(i) - qs := dp.QuantileValues() - additionalAttributes := pcommon.NewMap() - for i := 0; i < qs.Len(); i++ { - q := qs.At(i) - additionalAttributes.PutDouble(prometheusQuantileTag, q.Quantile()) - - line := f.doubleValueLine( - metric.Name(), - q.Value(), - dp, - f.mergeAttributes(attributes, additionalAttributes), - ) - lines = append(lines, line) - } - - line := f.doubleValueLine( - f.sumMetric(metric.Name()), - dp.Sum(), - dp, - attributes, - ) - lines = append(lines, line) - - line = f.uintValueLine( - f.countMetric(metric.Name()), - dp.Count(), - dp, - attributes, - ) - lines = append(lines, line) - } - return lines -} - -// histogram2Strings converts Histogram record to a list of strings, -// (n+1) where n is number of bounds plus two for sum and count per each data point -func (f *prometheusFormatter) histogram2Strings(metric pmetric.Metric, attributes pcommon.Map) []string { - dps := metric.Histogram().DataPoints() - var lines []string - - for i := 0; i < dps.Len(); i++ { - dp := dps.At(i) - - explicitBounds := dp.ExplicitBounds() - - var cumulative uint64 - additionalAttributes := pcommon.NewMap() - - for i := 0; i < explicitBounds.Len(); i++ { - bound := explicitBounds.At(i) - cumulative += dp.BucketCounts().At(i) - additionalAttributes.PutDouble(prometheusLeTag, bound) - - line := f.uintValueLine( - f.bucketMetric(metric.Name()), - cumulative, - dp, - f.mergeAttributes(attributes, additionalAttributes), - ) - lines = append(lines, line) - } - var line string - - // according to the spec, it's valid to have no buckets at all - if dp.BucketCounts().Len() > 0 { - cumulative += dp.BucketCounts().At(explicitBounds.Len()) - additionalAttributes.PutStr(prometheusLeTag, prometheusInfValue) - line = f.uintValueLine( - f.bucketMetric(metric.Name()), - cumulative, - dp, - f.mergeAttributes(attributes, additionalAttributes), - ) - lines = append(lines, line) - } - - if dp.HasSum() { - line = f.doubleValueLine( - f.sumMetric(metric.Name()), - dp.Sum(), - dp, - attributes, - ) - lines = append(lines, line) - } - - line = f.uintValueLine( - f.countMetric(metric.Name()), - dp.Count(), - dp, - attributes, - ) - lines = append(lines, line) - } - - return lines -} - -// metric2String returns stringified metricPair -func (f *prometheusFormatter) metric2String(metric pmetric.Metric, attributes pcommon.Map) string { - var lines []string - - switch metric.Type() { - case pmetric.MetricTypeGauge: - lines = f.gauge2Strings(metric, attributes) - case pmetric.MetricTypeSum: - lines = f.sum2Strings(metric, attributes) - case pmetric.MetricTypeSummary: - lines = f.summary2Strings(metric, attributes) - case pmetric.MetricTypeHistogram: - lines = f.histogram2Strings(metric, attributes) - } - return strings.Join(lines, "\n") -} diff --git a/pkg/exporter/sumologicexporter/prometheus_formatter_test.go b/pkg/exporter/sumologicexporter/prometheus_formatter_test.go deleted file mode 100644 index 86132467f7..0000000000 --- a/pkg/exporter/sumologicexporter/prometheus_formatter_test.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2020, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" -) - -func TestSanitizeKey(t *testing.T) { - f, err := newPrometheusFormatter() - require.NoError(t, err) - - key := "&^*123-abc-ABC!./?_:\n\r" - expected := "___123-abc-ABC_./__:__" - assert.EqualValues(t, expected, f.sanitizeKeyBytes([]byte(key))) -} - -func TestSanitizeValue(t *testing.T) { - f, err := newPrometheusFormatter() - require.NoError(t, err) - - // `\`, `"` and `\n` should be escaped, everything else should be left as-is - // see: https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#line-format - value := `&^*123-abc-ABC!?./"\` + "\n\r" - expected := `&^*123-abc-ABC!?./\"\\\n` + "\r" - assert.Equal(t, expected, f.sanitizeValue(value)) -} - -func TestTags2StringNoLabels(t *testing.T) { - f, err := newPrometheusFormatter() - require.NoError(t, err) - - _, attributes := exampleIntMetric() - attributes.Clear() - assert.Equal(t, prometheusTags(""), f.tags2String(attributes, pcommon.NewMap())) -} - -func TestTags2String(t *testing.T) { - f, err := newPrometheusFormatter() - require.NoError(t, err) - - _, attributes := exampleIntMetric() - attributes.PutInt("int", 200) - - labels := pcommon.NewMap() - labels.PutInt("l_int", 200) - labels.PutStr("l_str", "two") - - assert.Equal( - t, - prometheusTags(`{test="test_value",test2="second_value",int="200",l_int="200",l_str="two"}`), - f.tags2String(attributes, labels), - ) -} - -func TestTags2StringNoAttributes(t *testing.T) { - f, err := newPrometheusFormatter() - require.NoError(t, err) - - _, attributes := exampleIntMetric() - attributes.Clear() - assert.Equal(t, prometheusTags(""), f.tags2String(pcommon.NewMap(), pcommon.NewMap())) -} - -func TestPrometheusMetricDataTypeIntGauge(t *testing.T) { - f, err := newPrometheusFormatter() - require.NoError(t, err) - metric, attributes := exampleIntGaugeMetric() - - result := f.metric2String(metric, attributes) - expected := `gauge_metric_name{foo="bar",remote_name="156920",url="http://example_url"} 124 1608124661166 -gauge_metric_name{foo="bar",remote_name="156955",url="http://another_url"} 245 1608124662166` - assert.Equal(t, expected, result) -} - -func TestPrometheusMetricDataTypeDoubleGauge(t *testing.T) { - f, err := newPrometheusFormatter() - require.NoError(t, err) - metric, attributes := exampleDoubleGaugeMetric() - - result := f.metric2String(metric, attributes) - expected := `gauge_metric_name_double_test{foo="bar",local_name="156720",endpoint="http://example_url"} 33.4 1608124661169 -gauge_metric_name_double_test{foo="bar",local_name="156155",endpoint="http://another_url"} 56.8 1608124662186` - assert.Equal(t, expected, result) -} - -func TestPrometheusMetricDataTypeIntSum(t *testing.T) { - f, err := newPrometheusFormatter() - require.NoError(t, err) - metric, attributes := exampleIntSumMetric() - - result := f.metric2String(metric, attributes) - expected := `sum_metric_int_test{foo="bar",name="156720",address="http://example_url"} 45 1608124444169 -sum_metric_int_test{foo="bar",name="156155",address="http://another_url"} 1238 1608124699186` - assert.Equal(t, expected, result) -} - -func TestPrometheusMetricDataTypeDoubleSum(t *testing.T) { - f, err := newPrometheusFormatter() - require.NoError(t, err) - metric, attributes := exampleDoubleSumMetric() - - result := f.metric2String(metric, attributes) - expected := `sum_metric_double_test{foo="bar",pod_name="lorem",namespace="default"} 45.6 1618124444169 -sum_metric_double_test{foo="bar",pod_name="opsum",namespace="kube-config"} 1238.1 1608424699186` - assert.Equal(t, expected, result) -} - -func TestPrometheusMetricDataTypeSummary(t *testing.T) { - f, err := newPrometheusFormatter() - require.NoError(t, err) - metric, attributes := exampleSummaryMetric() - - result := f.metric2String(metric, attributes) - expected := `summary_metric_double_test{foo="bar",quantile="0.6",pod_name="dolor",namespace="sumologic"} 0.7 1618124444169 -summary_metric_double_test{foo="bar",quantile="2.6",pod_name="dolor",namespace="sumologic"} 4 1618124444169 -summary_metric_double_test_sum{foo="bar",pod_name="dolor",namespace="sumologic"} 45.6 1618124444169 -summary_metric_double_test_count{foo="bar",pod_name="dolor",namespace="sumologic"} 3 1618124444169 -summary_metric_double_test_sum{foo="bar",pod_name="sit",namespace="main"} 1238.1 1608424699186 -summary_metric_double_test_count{foo="bar",pod_name="sit",namespace="main"} 7 1608424699186` - assert.Equal(t, expected, result) -} - -func TestPrometheusMetricDataTypeHistogram(t *testing.T) { - f, err := newPrometheusFormatter() - require.NoError(t, err) - metric, attributes := exampleHistogramMetric() - - result := f.metric2String(metric, attributes) - expected := `histogram_metric_double_test_bucket{bar="foo",le="0.1",container="dolor",branch="sumologic"} 0 1618124444169 -histogram_metric_double_test_bucket{bar="foo",le="0.2",container="dolor",branch="sumologic"} 12 1618124444169 -histogram_metric_double_test_bucket{bar="foo",le="0.5",container="dolor",branch="sumologic"} 19 1618124444169 -histogram_metric_double_test_bucket{bar="foo",le="0.8",container="dolor",branch="sumologic"} 24 1618124444169 -histogram_metric_double_test_bucket{bar="foo",le="1",container="dolor",branch="sumologic"} 32 1618124444169 -histogram_metric_double_test_bucket{bar="foo",le="+Inf",container="dolor",branch="sumologic"} 45 1618124444169 -histogram_metric_double_test_sum{bar="foo",container="dolor",branch="sumologic"} 45.6 1618124444169 -histogram_metric_double_test_count{bar="foo",container="dolor",branch="sumologic"} 7 1618124444169 -histogram_metric_double_test_bucket{bar="foo",le="0.1",container="sit",branch="main"} 0 1608424699186 -histogram_metric_double_test_bucket{bar="foo",le="0.2",container="sit",branch="main"} 10 1608424699186 -histogram_metric_double_test_bucket{bar="foo",le="0.5",container="sit",branch="main"} 11 1608424699186 -histogram_metric_double_test_bucket{bar="foo",le="0.8",container="sit",branch="main"} 12 1608424699186 -histogram_metric_double_test_bucket{bar="foo",le="1",container="sit",branch="main"} 16 1608424699186 -histogram_metric_double_test_bucket{bar="foo",le="+Inf",container="sit",branch="main"} 22 1608424699186 -histogram_metric_double_test_sum{bar="foo",container="sit",branch="main"} 54.1 1608424699186 -histogram_metric_double_test_count{bar="foo",container="sit",branch="main"} 98 1608424699186` - assert.Equal(t, expected, result) -} - -func TestEmptyPrometheusMetrics(t *testing.T) { - type testCase struct { - name string - metricFunc func(fillData bool) (pmetric.Metric, pcommon.Map) - expected string - } - - tests := []testCase{ - { - name: "empty int gauge", - metricFunc: buildExampleIntGaugeMetric, - expected: "", - }, - { - name: "empty double gauge", - metricFunc: buildExampleDoubleGaugeMetric, - expected: "", - }, - { - name: "empty int sum", - metricFunc: buildExampleIntSumMetric, - expected: "", - }, - { - name: "empty double sum", - metricFunc: buildExampleDoubleSumMetric, - expected: "", - }, - { - name: "empty summary", - metricFunc: buildExampleSummaryMetric, - expected: "", - }, - { - name: "histogram with one datapoint, no sum or buckets", - metricFunc: buildExampleHistogramMetric, - expected: `histogram_metric_double_test_count{bar="foo"} 0 0`, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - f, err := newPrometheusFormatter() - require.NoError(t, err) - - result := f.metric2String(tt.metricFunc(false)) - assert.Equal(t, tt.expected, result) - }) - } -} - -func Benchmark_PrometheusFormatter_Metric2String(b *testing.B) { - f, err := newPrometheusFormatter() - require.NoError(b, err) - - metric, attributes := buildExampleHistogramMetric(true) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = f.metric2String(metric, attributes) - } -} diff --git a/pkg/exporter/sumologicexporter/sender.go b/pkg/exporter/sumologicexporter/sender.go deleted file mode 100644 index b69eb0cbb1..0000000000 --- a/pkg/exporter/sumologicexporter/sender.go +++ /dev/null @@ -1,779 +0,0 @@ -// Copyright 2020, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "reflect" - "strings" - "time" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/plog" - "go.opentelemetry.io/collector/pdata/pmetric" - "go.opentelemetry.io/collector/pdata/ptrace" - "go.uber.org/zap" - - "github.com/SumoLogic/sumologic-otel-collector/pkg/exporter/sumologicexporter/internal/observability" -) - -var ( - tracesMarshaler = ptrace.ProtoMarshaler{} - metricsMarshaler = pmetric.ProtoMarshaler{} - logsMarshaler = plog.ProtoMarshaler{} -) - -// metricPair represents information required to send one metric to the Sumo Logic -type metricPair struct { - attributes pcommon.Map - metric pmetric.Metric -} - -// countingReader keeps number of records related to reader -type countingReader struct { - counter int64 - reader io.Reader -} - -// newCountingReader creates countingReader with given number of records -func newCountingReader(records int) *countingReader { - return &countingReader{ - counter: int64(records), - } -} - -// withBytes sets up reader to read from bytes data -func (c *countingReader) withBytes(data []byte) *countingReader { - c.reader = bytes.NewReader(data) - return c -} - -// withString sets up reader to read from string data -func (c *countingReader) withString(data string) *countingReader { - c.reader = strings.NewReader(data) - return c -} - -// bodyBuilder keeps information about number of records related to data it keeps -type bodyBuilder struct { - builder strings.Builder - counter int -} - -// newBodyBuilder returns empty bodyBuilder -func newBodyBuilder() bodyBuilder { - return bodyBuilder{} -} - -// Reset resets both counter and builder content -func (b *bodyBuilder) Reset() { - b.counter = 0 - b.builder.Reset() -} - -// addLine adds multiple lines to builder and increments counter -func (b *bodyBuilder) addLines(lines []string) { - if len(lines) == 0 { - return - } - - // add the first line separately to avoid a conditional in the loop - b.builder.WriteString(lines[0]) - - for _, line := range lines[1:] { - b.builder.WriteByte('\n') - b.builder.WriteString(line) // WriteString can't actually return an error - } - b.counter += len(lines) -} - -// addNewLine adds newline to builder -func (b *bodyBuilder) addNewLine() { - b.builder.WriteByte('\n') // WriteByte can't actually return an error -} - -// Len returns builder content length -func (b *bodyBuilder) Len() int { - return b.builder.Len() -} - -// toCountingReader converts bodyBuilder to countingReader -func (b *bodyBuilder) toCountingReader() *countingReader { - return newCountingReader(b.counter).withString(b.builder.String()) -} - -type sender struct { - logger *zap.Logger - config *Config - client *http.Client - prometheusFormatter prometheusFormatter - dataUrlMetrics string - dataUrlLogs string - dataUrlTraces string - stickySessionCookieFunc func() string - setStickySessionCookieFunc func(string) - id component.ID -} - -const ( - // maxBufferSize defines size of the logBuffer (maximum number of plog.LogRecord entries) - // maxBufferSize int = 1024 * 1024 - - headerContentType string = "Content-Type" - headerContentEncoding string = "Content-Encoding" - headerClient string = "X-Sumo-Client" - headerHost string = "X-Sumo-Host" - headerName string = "X-Sumo-Name" - headerCategory string = "X-Sumo-Category" - headerFields string = "X-Sumo-Fields" - - attributeKeySourceHost = "_sourceHost" - attributeKeySourceName = "_sourceName" - attributeKeySourceCategory = "_sourceCategory" - - contentTypeLogs string = "application/x-www-form-urlencoded" - contentTypePrometheus string = "application/vnd.sumologic.prometheus" - contentTypeOTLP string = "application/x-protobuf" - contentEncodingGzip string = "gzip" - contentEncodingDeflate string = "deflate" - stickySessionKey string = "AWSALB" -) - -func newSender( - logger *zap.Logger, - cfg *Config, - cl *http.Client, - pf prometheusFormatter, - metricsUrl string, - logsUrl string, - tracesUrl string, - stickySessionCookieFunc func() string, - setStickySessionCookieFunc func(string), - id component.ID, -) *sender { - return &sender{ - logger: logger, - config: cfg, - client: cl, - prometheusFormatter: pf, - dataUrlMetrics: metricsUrl, - dataUrlLogs: logsUrl, - dataUrlTraces: tracesUrl, - stickySessionCookieFunc: stickySessionCookieFunc, - setStickySessionCookieFunc: setStickySessionCookieFunc, - id: id, - } -} - -var errUnauthorized = errors.New("unauthorized") - -// send sends data to sumologic -func (s *sender) send(ctx context.Context, pipeline PipelineType, reader *countingReader, flds fields) error { - req, err := s.createRequest(ctx, pipeline, reader.reader) - if err != nil { - return err - } - - if err := s.addRequestHeaders(req, pipeline, flds); err != nil { - return err - } - - if s.config.StickySessionEnabled { - s.addStickySessionCookie(req) - } - - s.logger.Debug("Sending data", - zap.String("pipeline", string(pipeline)), - zap.Any("headers", req.Header), - ) - - start := time.Now() - resp, err := s.client.Do(req) - if err != nil { - s.recordMetrics(time.Since(start), reader.counter, req, nil, pipeline) - return err - } - defer resp.Body.Close() - - s.recordMetrics(time.Since(start), reader.counter, req, resp, pipeline) - - return s.handleReceiverResponse(resp) -} - -func (s *sender) handleReceiverResponse(resp *http.Response) error { - if s.config.StickySessionEnabled { - s.updateStickySessionCookie(resp) - } - - // API responds with a 200 or 204 with ConentLength set to 0 when all data - // has been successfully ingested. - if resp.ContentLength == 0 && (resp.StatusCode == 200 || resp.StatusCode == 204) { - return nil - } - - type ReceiverResponseCore struct { - Status int `json:"status,omitempty"` - ID string `json:"id,omitempty"` - Code string `json:"code,omitempty"` - Message string `json:"message,omitempty"` - } - - // API responds with a 200 or 204 with a JSON body describing what issues - // were encountered when processing the sent data. - switch resp.StatusCode { - case 200, 204: - if resp.ContentLength < 0 { - s.logger.Warn("Unknown length of server response") - return nil - } - - var rResponse ReceiverResponseCore - var ( - b = bytes.NewBuffer(make([]byte, 0, resp.ContentLength)) - tr = io.TeeReader(resp.Body, b) - ) - - if err := json.NewDecoder(tr).Decode(&rResponse); err != nil { - s.logger.Warn("Error decoding receiver response", zap.ByteString("body", b.Bytes())) - return nil - } - - l := s.logger.With(zap.String("status", resp.Status)) - if len(rResponse.ID) > 0 { - l = l.With(zap.String("id", rResponse.ID)) - } - if len(rResponse.Code) > 0 { - l = l.With(zap.String("code", rResponse.Code)) - } - if len(rResponse.Message) > 0 { - l = l.With(zap.String("message", rResponse.Message)) - } - l.Warn("There was an issue sending data") - return nil - - case 401: - return errUnauthorized - - default: - type ReceiverErrorResponse struct { - ReceiverResponseCore - Errors []struct { - Code string `json:"code"` - Message string `json:"message"` - } `json:"errors,omitempty"` - } - - var rResponse ReceiverErrorResponse - if resp.ContentLength > 0 { - var ( - b = bytes.NewBuffer(make([]byte, 0, resp.ContentLength)) - tr = io.TeeReader(resp.Body, b) - ) - - if err := json.NewDecoder(tr).Decode(&rResponse); err != nil { - return fmt.Errorf("failed to decode API response (status: %s): %s", - resp.Status, b.String(), - ) - } - } - - errMsgs := []string{ - fmt.Sprintf("status: %s", resp.Status), - } - - if len(rResponse.ID) > 0 { - errMsgs = append(errMsgs, fmt.Sprintf("id: %s", rResponse.ID)) - } - if len(rResponse.Code) > 0 { - errMsgs = append(errMsgs, fmt.Sprintf("code: %s", rResponse.Code)) - } - if len(rResponse.Message) > 0 { - errMsgs = append(errMsgs, fmt.Sprintf("message: %s", rResponse.Message)) - } - if len(rResponse.Errors) > 0 { - errMsgs = append(errMsgs, fmt.Sprintf("errors: %+v", rResponse.Errors)) - } - - err := fmt.Errorf("failed sending data: %s", strings.Join(errMsgs, ", ")) - - if resp.StatusCode == http.StatusBadRequest { - // Report the failure as permanent if the server thinks the request is malformed. - return consumererror.NewPermanent(err) - } - - return err - } -} - -func (s *sender) createRequest(ctx context.Context, pipeline PipelineType, data io.Reader) (*http.Request, error) { - var url string - - switch pipeline { - case MetricsPipeline: - url = s.dataUrlMetrics - case LogsPipeline: - url = s.dataUrlLogs - case TracesPipeline: - url = s.dataUrlTraces - default: - return nil, fmt.Errorf("unknown pipeline type: %s", pipeline) - } - - req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, data) - if err != nil { - return req, err - } - - return req, err -} - -// logToText converts LogRecord to a plain text line, returns it and error eventually -func (s *sender) logToText(record plog.LogRecord) string { - return record.Body().AsString() -} - -// logToJSON converts LogRecord to a json line, returns it and error eventually -func (s *sender) logToJSON(record plog.LogRecord) (string, error) { - recordCopy := plog.NewLogRecord() - record.CopyTo(recordCopy) - - // Only append the body when it's not empty to prevent sending 'null' log. - if body := recordCopy.Body(); !isEmptyAttributeValue(body) { - body.CopyTo(recordCopy.Attributes().PutEmpty(DefaultLogKey)) - } - - nextLine := new(bytes.Buffer) - enc := json.NewEncoder(nextLine) - enc.SetEscapeHTML(false) - err := enc.Encode(recordCopy.Attributes().AsRaw()) - - if err != nil { - return "", err - } - - return strings.TrimSuffix(nextLine.String(), "\n"), nil -} - -func isEmptyAttributeValue(att pcommon.Value) bool { - switch att.Type() { - case pcommon.ValueTypeEmpty: - return true - case pcommon.ValueTypeStr: - return len(att.Str()) == 0 - case pcommon.ValueTypeSlice: - return att.Slice().Len() == 0 - case pcommon.ValueTypeMap: - return att.Map().Len() == 0 - case pcommon.ValueTypeBytes: - return att.Bytes().Len() == 0 - } - - return false -} - -// sendNonOTLPLogs sends log records from the logBuffer formatted according -// to configured LogFormat and as the result of execution -// returns array of records which has not been sent correctly and error -func (s *sender) sendNonOTLPLogs(ctx context.Context, rl plog.ResourceLogs, flds fields) ([]plog.LogRecord, error) { - if s.config.LogFormat == OTLPLogFormat { - return nil, fmt.Errorf("attempting to send OTLP logs as non-OTLP data") - } - - var ( - body bodyBuilder = newBodyBuilder() - errs []error - droppedRecords []plog.LogRecord - currentRecords []plog.LogRecord - ) - - slgs := rl.ScopeLogs() - for i := 0; i < slgs.Len(); i++ { - slg := slgs.At(i) - for j := 0; j < slg.LogRecords().Len(); j++ { - lr := slg.LogRecords().At(j) - formattedLine, err := s.formatLogLine(lr) - if err != nil { - droppedRecords = append(droppedRecords, lr) - errs = append(errs, err) - continue - } - - sent, err := s.appendAndMaybeSend(ctx, []string{formattedLine}, LogsPipeline, &body, flds) - if err != nil { - errs = append(errs, err) - droppedRecords = append(droppedRecords, currentRecords...) - } - - // If data was sent and either failed or succeeded, cleanup the currentRecords slice - if sent { - currentRecords = currentRecords[:0] - } - - currentRecords = append(currentRecords, lr) - } - } - - if body.Len() > 0 { - if err := s.send(ctx, LogsPipeline, body.toCountingReader(), flds); err != nil { - errs = append(errs, err) - droppedRecords = append(droppedRecords, currentRecords...) - } - } - - return droppedRecords, errors.Join(errs...) -} - -func (s *sender) formatLogLine(lr plog.LogRecord) (string, error) { - var formattedLine string - var err error - - switch s.config.LogFormat { - case TextFormat: - formattedLine = s.logToText(lr) - case JSONFormat: - formattedLine, err = s.logToJSON(lr) - default: - err = errors.New("unexpected log format") - } - - return formattedLine, err -} - -// TODO: add support for HTTP limits -func (s *sender) sendOTLPLogs(ctx context.Context, ld plog.Logs) error { - body, err := logsMarshaler.MarshalLogs(ld) - if err != nil { - return err - } - - return s.send(ctx, LogsPipeline, newCountingReader(ld.LogRecordCount()).withBytes(body), fields{}) -} - -// sendNonOTLPMetrics sends metrics in right format basing on the s.config.MetricFormat -func (s *sender) sendNonOTLPMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, []error) { - if s.config.MetricFormat == OTLPMetricFormat { - return md, []error{fmt.Errorf("attempting to send OTLP metrics as non-OTLP data")} - } - - var ( - body bodyBuilder = newBodyBuilder() - errs []error - currentResources []pmetric.ResourceMetrics - flds fields - ) - - rms := md.ResourceMetrics() - droppedMetrics := pmetric.NewMetrics() - for i := 0; i < rms.Len(); i++ { - rm := rms.At(i) - flds = newFields(rm.Resource().Attributes()) - sms := rm.ScopeMetrics() - - // generally speaking, it's fine to send multiple ResourceMetrics in a single request - // the only exception is if the computed source headers are different, as those as unique per-request - // so we check if the headers are different here and send what we have if they are - if i > 0 { - currentSourceHeaders := getSourcesHeaders(flds) - previousFields := newFields(rms.At(i - 1).Resource().Attributes()) - previousSourceHeaders := getSourcesHeaders(previousFields) - if !reflect.DeepEqual(previousSourceHeaders, currentSourceHeaders) && body.Len() > 0 { - - if err := s.send(ctx, MetricsPipeline, body.toCountingReader(), previousFields); err != nil { - errs = append(errs, err) - for _, resource := range currentResources { - resource.CopyTo(droppedMetrics.ResourceMetrics().AppendEmpty()) - } - } - body.Reset() - currentResources = currentResources[:0] - } - } - - // transform the metrics into formatted lines ready to be sent - var formattedLines []string - var err error - for i := 0; i < sms.Len(); i++ { - sm := sms.At(i) - - for j := 0; j < sm.Metrics().Len(); j++ { - m := sm.Metrics().At(j) - - var formattedLine string - - switch s.config.MetricFormat { - case PrometheusFormat: - formattedLine = s.prometheusFormatter.metric2String(m, rm.Resource().Attributes()) - default: - return md, []error{fmt.Errorf("unexpected metric format: %s", s.config.MetricFormat)} - } - - formattedLines = append(formattedLines, formattedLine) - } - } - - sent, err := s.appendAndMaybeSend(ctx, formattedLines, MetricsPipeline, &body, flds) - if err != nil { - errs = append(errs, err) - if sent { - // failed at sending, add the resource to the dropped metrics - // move instead of copy here to avoid duplicating data in memory on failure - for _, resource := range currentResources { - resource.CopyTo(droppedMetrics.ResourceMetrics().AppendEmpty()) - } - } - } - - // If data was sent, cleanup the currentResources slice - if sent { - currentResources = currentResources[:0] - } - - currentResources = append(currentResources, rm) - - } - - if body.Len() > 0 { - if err := s.send(ctx, MetricsPipeline, body.toCountingReader(), flds); err != nil { - errs = append(errs, err) - for _, resource := range currentResources { - resource.CopyTo(droppedMetrics.ResourceMetrics().AppendEmpty()) - } - } - } - - return droppedMetrics, errs -} - -func (s *sender) sendOTLPMetrics(ctx context.Context, md pmetric.Metrics) error { - rms := md.ResourceMetrics() - if rms.Len() == 0 { - s.logger.Debug("there are no metrics to send, moving on") - return nil - } - if s.config.DecomposeOtlpHistograms { - md = DecomposeHistograms(md) - } - - body, err := metricsMarshaler.MarshalMetrics(md) - if err != nil { - return err - } - - return s.send(ctx, MetricsPipeline, newCountingReader(md.DataPointCount()).withBytes(body), fields{}) -} - -// appendAndMaybeSend appends line to the request body that will be sent and sends -// the accumulated data if the internal logBuffer has been filled (with config.MaxRequestBodySize bytes). -// It returns a boolean indicating if the data was sent and an error -func (s *sender) appendAndMaybeSend( - ctx context.Context, - lines []string, - pipeline PipelineType, - body *bodyBuilder, - flds fields, -) (sent bool, err error) { - - linesTotalLength := 0 - for _, line := range lines { - linesTotalLength += len(line) + 1 // count the newline as well - } - - if body.Len() > 0 && body.Len()+linesTotalLength >= s.config.MaxRequestBodySize { - sent = true - err = s.send(ctx, pipeline, body.toCountingReader(), flds) - body.Reset() - } - - if body.Len() > 0 { - // Do not add newline if the body is empty - body.addNewLine() - } - - body.addLines(lines) - - return sent, err -} - -// sendTraces sends traces in right format basing on the s.config.TraceFormat -func (s *sender) sendTraces(ctx context.Context, td ptrace.Traces) error { - if s.config.TraceFormat == OTLPTraceFormat { - return s.sendOTLPTraces(ctx, td) - } - return nil -} - -// sendOTLPTraces sends trace records in OTLP format -func (s *sender) sendOTLPTraces(ctx context.Context, td ptrace.Traces) error { - if td.ResourceSpans().Len() == 0 { - s.logger.Debug("there are no traces to send, moving on") - return nil - } - - capacity := td.SpanCount() - - body, err := tracesMarshaler.MarshalTraces(td) - if err != nil { - return err - } - if err := s.send(ctx, TracesPipeline, newCountingReader(capacity).withBytes(body), fields{}); err != nil { - return err - } - return nil -} - -func addSourcesHeaders(req *http.Request, flds fields) { - sourceHeaderValues := getSourcesHeaders(flds) - - for headerName, headerValue := range sourceHeaderValues { - req.Header.Add(headerName, headerValue) - } -} - -func getSourcesHeaders(flds fields) map[string]string { - sourceHeaderValues := map[string]string{} - if !flds.isInitialized() { - return sourceHeaderValues - } - - attrs := flds.orig - - if v, ok := attrs.Get(attributeKeySourceHost); ok { - sourceHeaderValues[headerHost] = v.AsString() - } - - if v, ok := attrs.Get(attributeKeySourceName); ok { - sourceHeaderValues[headerName] = v.AsString() - } - - if v, ok := attrs.Get(attributeKeySourceCategory); ok { - sourceHeaderValues[headerCategory] = v.AsString() - } - return sourceHeaderValues -} - -func addLogsHeaders(req *http.Request, lf LogFormatType, flds fields) { - switch lf { - case OTLPLogFormat: - req.Header.Add(headerContentType, contentTypeOTLP) - default: - req.Header.Add(headerContentType, contentTypeLogs) - } - - if fieldsStr := flds.string(); fieldsStr != "" { - req.Header.Add(headerFields, fieldsStr) - } -} - -func addMetricsHeaders(req *http.Request, mf MetricFormatType) error { - switch mf { - case PrometheusFormat: - req.Header.Add(headerContentType, contentTypePrometheus) - case OTLPMetricFormat: - req.Header.Add(headerContentType, contentTypeOTLP) - default: - return fmt.Errorf("unsupported metrics format: %s", mf) - } - return nil -} - -func addTracesHeaders(req *http.Request, tf TraceFormatType) error { - switch tf { - case OTLPTraceFormat: - req.Header.Add(headerContentType, contentTypeOTLP) - default: - return fmt.Errorf("unsupported traces format: %s", tf) - } - return nil -} - -func (s *sender) addRequestHeaders(req *http.Request, pipeline PipelineType, flds fields) error { - req.Header.Add(headerClient, s.config.Client) - addSourcesHeaders(req, flds) - - switch pipeline { - case LogsPipeline: - addLogsHeaders(req, s.config.LogFormat, flds) - case MetricsPipeline: - if err := addMetricsHeaders(req, s.config.MetricFormat); err != nil { - return err - } - case TracesPipeline: - if err := addTracesHeaders(req, s.config.TraceFormat); err != nil { - return err - } - default: - return fmt.Errorf("unexpected pipeline: %v", pipeline) - } - return nil -} - -func (s *sender) recordMetrics(duration time.Duration, count int64, req *http.Request, resp *http.Response, pipeline PipelineType) { - statusCode := 0 - - if resp != nil { - statusCode = resp.StatusCode - } - - id := s.id.String() - - if err := observability.RecordRequestsDuration(duration, statusCode, req.URL.String(), string(pipeline), id); err != nil { - s.logger.Debug("error for recording metric for request duration", zap.Error(err)) - } - - if err := observability.RecordRequestsBytes(req.ContentLength, statusCode, req.URL.String(), string(pipeline), id); err != nil { - s.logger.Debug("error for recording metric for sent bytes", zap.Error(err)) - } - - if err := observability.RecordRequestsRecords(count, statusCode, req.URL.String(), string(pipeline), id); err != nil { - s.logger.Debug("error for recording metric for sent records", zap.Error(err)) - } - - if err := observability.RecordRequestsSent(statusCode, req.URL.String(), string(pipeline), id); err != nil { - s.logger.Debug("error for recording metric for sent request", zap.Error(err)) - } -} - -func (s *sender) addStickySessionCookie(req *http.Request) { - currectCookieValue := s.stickySessionCookieFunc() - if currectCookieValue != "" { - cookie := &http.Cookie{ - Name: stickySessionKey, - Value: currectCookieValue, - } - req.AddCookie(cookie) - } -} - -func (s *sender) updateStickySessionCookie(resp *http.Response) { - cookies := resp.Cookies() - if len(cookies) > 0 { - for _, cookie := range cookies { - if cookie.Name == stickySessionKey { - if cookie.Value != s.stickySessionCookieFunc() { - s.setStickySessionCookieFunc(cookie.Value) - } - return - } - } - } -} diff --git a/pkg/exporter/sumologicexporter/sender_test.go b/pkg/exporter/sumologicexporter/sender_test.go deleted file mode 100644 index 5b7fd338e3..0000000000 --- a/pkg/exporter/sumologicexporter/sender_test.go +++ /dev/null @@ -1,1316 +0,0 @@ -// Copyright 2020, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "bufio" - "bytes" - "compress/gzip" - "compress/zlib" - "context" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strings" - "sync/atomic" - "testing" - - "github.com/klauspost/compress/zstd" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/config/configcompression" - "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/plog" - "go.opentelemetry.io/collector/pdata/ptrace" - - "go.opentelemetry.io/collector/pdata/pmetric" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -type senderTest struct { - reqCounter *int32 - srv *httptest.Server - s *sender -} - -// prepareSenderTest prepares sender test environment. -// Provided cfgOpts additionally configure the sender after the sendible default -// for tests have been applied. -// The enclosed httptest.Server is closed automatically using test.Cleanup. -func prepareSenderTest(t *testing.T, compression configcompression.Type, cb []func(w http.ResponseWriter, req *http.Request), cfgOpts ...func(*Config)) *senderTest { - var reqCounter int32 - // generate a test server so we can capture and inspect the request - testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if len(cb) == 0 { - return - } - - if c := int(atomic.LoadInt32(&reqCounter)); assert.Greater(t, len(cb), c) { - cb[c](w, req) - atomic.AddInt32(&reqCounter, 1) - } - })) - t.Cleanup(func() { testServer.Close() }) - - cfg := createDefaultConfig().(*Config) - cfg.ClientConfig.Endpoint = testServer.URL - switch compression { - case configcompression.TypeGzip: - cfg.ClientConfig.Compression = configcompression.TypeGzip - case configcompression.TypeZstd: - cfg.ClientConfig.Compression = configcompression.TypeZstd - case NoCompression: - cfg.ClientConfig.Compression = NoCompression - case configcompression.TypeDeflate: - cfg.ClientConfig.Compression = configcompression.TypeDeflate - default: - cfg.CompressEncoding = configcompression.TypeGzip - } - cfg.ClientConfig.Auth = nil - httpSettings := cfg.ClientConfig - host := componenttest.NewNopHost() - client, err := httpSettings.ToClient(context.TODO(), host, component.TelemetrySettings{}) - require.NoError(t, err) - if err != nil { - return nil - } - cfg.LogFormat = TextFormat - cfg.MetricFormat = OTLPMetricFormat - cfg.MaxRequestBodySize = 20_971_520 - for _, cfgOpt := range cfgOpts { - cfgOpt(cfg) - } - - pf, err := newPrometheusFormatter() - require.NoError(t, err) - - require.NoError(t, err) - - logger, err := zap.NewDevelopment() - require.NoError(t, err) - - return &senderTest{ - reqCounter: &reqCounter, - srv: testServer, - s: newSender( - logger, - cfg, - client, - pf, - testServer.URL, - testServer.URL, - testServer.URL, - func() string { return "" }, - func(string) {}, - component.ID{}, - ), - } -} - -func extractBody(t *testing.T, req *http.Request) string { - buf := new(strings.Builder) - _, err := io.Copy(buf, req.Body) - require.NoError(t, err) - return buf.String() -} - -func exampleLog() []plog.LogRecord { - buffer := make([]plog.LogRecord, 1) - buffer[0] = plog.NewLogRecord() - buffer[0].Body().SetStr("Example log") - - return buffer -} - -func exampleNLogs(n int) []plog.LogRecord { - buffer := make([]plog.LogRecord, n) - for i := 0; i < n; i++ { - buffer[i] = plog.NewLogRecord() - buffer[i].Body().SetStr("Example log") - } - - return buffer -} - -func exampleTwoLogs() []plog.LogRecord { - buffer := make([]plog.LogRecord, 2) - buffer[0] = plog.NewLogRecord() - buffer[0].Body().SetStr("Example log") - buffer[0].Attributes().PutStr("key1", "value1") - buffer[0].Attributes().PutStr("key2", "value2") - buffer[1] = plog.NewLogRecord() - buffer[1].Body().SetStr("Another example log") - buffer[1].Attributes().PutStr("key1", "value1") - buffer[1].Attributes().PutStr("key2", "value2") - - return buffer -} - -func decodeGzip(t *testing.T, data io.Reader) string { - r, err := gzip.NewReader(data) - require.NoError(t, err) - - var buf []byte - buf, err = io.ReadAll(r) - require.NoError(t, err) - - return string(buf) -} - -func decodeZstd(t *testing.T, data io.Reader) string { - r, err := zstd.NewReader(data) - require.NoError(t, err) - var buf []byte - buf, err = io.ReadAll(r) - require.NoError(t, err) - - return string(buf) -} - -func decodeZlib(t *testing.T, data io.Reader) string { - r, err := zlib.NewReader(data) - if err != nil { - return "" - } - var buf []byte - buf, err = io.ReadAll(r) - require.NoError(t, err) - - return string(buf) -} - -func TestSendTrace(t *testing.T) { - tracesMarshaler = ptrace.ProtoMarshaler{} - td := exampleTrace() - traceBody, err := tracesMarshaler.MarshalTraces(td) - assert.NoError(t, err) - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - assert.Equal(t, string(traceBody), body) - assert.Equal(t, "otelcol", req.Header.Get("X-Sumo-Client")) - assert.Equal(t, "application/x-protobuf", req.Header.Get("Content-Type")) - }, - }) - - err = test.s.sendTraces(context.Background(), td) - assert.NoError(t, err) -} - -func TestSendLogs(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - assert.Equal(t, "Example log\nAnother example log", body) - assert.Equal(t, "key1=value, key2=value2", req.Header.Get("X-Sumo-Fields")) - assert.Equal(t, "otelcol", req.Header.Get("X-Sumo-Client")) - assert.Equal(t, "application/x-www-form-urlencoded", req.Header.Get("Content-Type")) - }, - }) - - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs() - logsRecords1 := slgs.AppendEmpty().LogRecords() - logsRecords1.AppendEmpty().Body().SetStr("Example log") - logsRecords2 := slgs.AppendEmpty().LogRecords() - logsRecords2.AppendEmpty().Body().SetStr("Another example log") - - _, err := test.s.sendNonOTLPLogs(context.Background(), - rls, - fieldsFromMap(map[string]string{"key1": "value", "key2": "value2"}), - ) - assert.NoError(t, err) - assert.EqualValues(t, 1, *test.reqCounter) -} - -func TestSendLogsWithEmptyField(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - assert.Equal(t, "Example log\nAnother example log", body) - assert.Equal(t, "key1=value, key2=value2", req.Header.Get("X-Sumo-Fields")) - assert.Equal(t, "otelcol", req.Header.Get("X-Sumo-Client")) - assert.Equal(t, "application/x-www-form-urlencoded", req.Header.Get("Content-Type")) - }, - }) - - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs() - logsRecords1 := slgs.AppendEmpty().LogRecords() - logsRecords1.AppendEmpty().Body().SetStr("Example log") - logsRecords2 := slgs.AppendEmpty().LogRecords() - logsRecords2.AppendEmpty().Body().SetStr("Another example log") - - _, err := test.s.sendNonOTLPLogs(context.Background(), - rls, - fieldsFromMap(map[string]string{"key1": "value", "key2": "value2", "service": ""}), - ) - assert.NoError(t, err) - assert.EqualValues(t, 1, *test.reqCounter) -} - -func TestSendLogsMultitype(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - expected := `{"lk1":"lv1","lk2":13} -["lv2",13]` - assert.Equal(t, expected, body) - assert.Equal(t, "key1=value, key2=value2", req.Header.Get("X-Sumo-Fields")) - assert.Equal(t, "otelcol", req.Header.Get("X-Sumo-Client")) - assert.Equal(t, "application/x-www-form-urlencoded", req.Header.Get("Content-Type")) - }, - }) - - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs() - logsRecords := slgs.AppendEmpty().LogRecords() - attVal := pcommon.NewValueMap() - attMap := attVal.Map() - attMap.PutStr("lk1", "lv1") - attMap.PutInt("lk2", 13) - logRecord := logsRecords.AppendEmpty() - attVal.CopyTo(logRecord.Body()) - - attVal = pcommon.NewValueSlice() - attArr := attVal.Slice() - strVal := pcommon.NewValueStr("lv2") - intVal := pcommon.NewValueInt(13) - strVal.CopyTo(attArr.AppendEmpty()) - intVal.CopyTo(attArr.AppendEmpty()) - attVal.CopyTo(logsRecords.AppendEmpty().Body()) - - _, err := test.s.sendNonOTLPLogs(context.Background(), - rls, - fieldsFromMap(map[string]string{"key1": "value", "key2": "value2"}), - ) - assert.NoError(t, err) - - assert.EqualValues(t, 1, *test.reqCounter) -} - -func TestSendLogsSplit(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - assert.Equal(t, "Example log", body) - }, - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - assert.Equal(t, "Another example log", body) - }, - }) - test.s.config.MaxRequestBodySize = 10 - - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs() - logsRecords1 := slgs.AppendEmpty().LogRecords() - logsRecords1.AppendEmpty().Body().SetStr("Example log") - logsRecords2 := slgs.AppendEmpty().LogRecords() - logsRecords2.AppendEmpty().Body().SetStr("Another example log") - - _, err := test.s.sendNonOTLPLogs(context.Background(), - rls, - fields{}, - ) - assert.NoError(t, err) - - assert.EqualValues(t, 2, *test.reqCounter) -} - -func TestSendLogsSplitFailedOne(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(500) - _, err := fmt.Fprintf( - w, - `{"id":"1TIRY-KGIVX-TPQRJ","errors":[{"code":"internal.error","message":"Internal server error."}]}`, - ) - - require.NoError(t, err) - - body := extractBody(t, req) - assert.Equal(t, "Example log", body) - }, - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - assert.Equal(t, "Another example log", body) - }, - }) - test.s.config.MaxRequestBodySize = 10 - test.s.config.LogFormat = TextFormat - - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs() - logsRecords1 := slgs.AppendEmpty().LogRecords() - logsRecords1.AppendEmpty().Body().SetStr("Example log") - logsRecords2 := slgs.AppendEmpty().LogRecords() - logsRecords2.AppendEmpty().Body().SetStr("Another example log") - - dropped, err := test.s.sendNonOTLPLogs(context.Background(), - rls, - fields{}, - ) - assert.EqualError(t, err, "failed sending data: status: 500 Internal Server Error, id: 1TIRY-KGIVX-TPQRJ, errors: [{Code:internal.error Message:Internal server error.}]") - assert.Len(t, dropped, 1) - - assert.EqualValues(t, 2, *test.reqCounter) -} - -func TestSendLogsSplitFailedAll(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(500) - - body := extractBody(t, req) - assert.Equal(t, "Example log", body) - }, - func(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(404) - - body := extractBody(t, req) - assert.Equal(t, "Another example log", body) - }, - }) - test.s.config.MaxRequestBodySize = 10 - test.s.config.LogFormat = TextFormat - - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs() - logsRecords1 := slgs.AppendEmpty().LogRecords() - logsRecords1.AppendEmpty().Body().SetStr("Example log") - logsRecords2 := slgs.AppendEmpty().LogRecords() - logsRecords2.AppendEmpty().Body().SetStr("Another example log") - - dropped, err := test.s.sendNonOTLPLogs(context.Background(), rls, fields{}) - assert.EqualError( - t, - err, - "failed sending data: status: 500 Internal Server Error\nfailed sending data: status: 404 Not Found", - ) - assert.Len(t, dropped, 2) - - assert.EqualValues(t, 2, *test.reqCounter) -} - -func TestSendLogsJsonConfig(t *testing.T) { - twoLogsFunc := func() plog.ResourceLogs { - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs().AppendEmpty() - log := slgs.LogRecords().AppendEmpty() - - log.Body().SetStr("Example log") - log.Attributes().PutStr("key1", "value1") - log.Attributes().PutStr("key2", "value2") - - log = slgs.LogRecords().AppendEmpty() - log.Body().SetStr("Another example log") - log.Attributes().PutStr("key1", "value1") - log.Attributes().PutStr("key2", "value2") - - return rls - } - - twoComplexBodyLogsFunc := func() plog.ResourceLogs { - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs().AppendEmpty() - log := slgs.LogRecords().AppendEmpty() - - body := pcommon.NewValueMap().Map() - body.PutStr("a", "b") - body.PutBool("c", false) - body.PutInt("d", 20) - body.PutDouble("e", 20.5) - - f := pcommon.NewValueSlice() - f.Slice().EnsureCapacity(4) - f.Slice().AppendEmpty().SetStr("p") - f.Slice().AppendEmpty().SetBool(true) - f.Slice().AppendEmpty().SetInt(13) - f.Slice().AppendEmpty().SetDouble(19.3) - f.Slice().CopyTo(body.PutEmptySlice("f")) - - g := pcommon.NewValueMap() - g.Map().PutStr("h", "i") - g.Map().PutBool("j", false) - g.Map().PutInt("k", 12) - g.Map().PutDouble("l", 11.1) - - g.Map().CopyTo(body.PutEmptyMap("g")) - - log.Attributes().PutStr("m", "n") - - pcommon.NewValueMap().CopyTo(log.Body()) - body.CopyTo(log.Body().Map()) - - return rls - } - - testcases := []struct { - name string - configOpts []func(*Config) - bodyRegex string - logsFunc func() plog.ResourceLogs - }{ - { - name: "default config", - bodyRegex: `{"key1":"value1","key2":"value2","log":"Example log"}` + - `\n` + - `{"key1":"value1","key2":"value2","log":"Another example log"}`, - logsFunc: twoLogsFunc, - }, - { - name: "empty body", - bodyRegex: `{"key1":"value1","key2":"value2"}`, - - logsFunc: func() plog.ResourceLogs { - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs().AppendEmpty() - log := slgs.LogRecords().AppendEmpty() - - log.Attributes().PutStr("key1", "value1") - log.Attributes().PutStr("key2", "value2") - - return rls - }, - }, - { - name: "complex body", - bodyRegex: `{"log":{"a":"b","c":false,"d":20,"e":20.5,"f":\["p",true,13,19.3\],` + - `"g":{"h":"i","j":false,"k":12,"l":11.1}},"m":"n"}`, - logsFunc: twoComplexBodyLogsFunc, - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - assert.Regexp(t, tc.bodyRegex, body) - }, - }, tc.configOpts...) - - test.s.config.LogFormat = JSONFormat - - _, err := test.s.sendNonOTLPLogs(context.Background(), - tc.logsFunc(), - fields{}, - ) - assert.NoError(t, err) - - assert.EqualValues(t, 1, *test.reqCounter) - }) - } -} - -func TestSendLogsJson(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - var regex string - regex += `{"key1":"value1","key2":"value2","log":"Example log"}` - regex += `\n` - regex += `{"key1":"value1","key2":"value2","log":"Another example log"}` - assert.Regexp(t, regex, body) - - assert.Equal(t, "key=value", req.Header.Get("X-Sumo-Fields")) - assert.Equal(t, "otelcol", req.Header.Get("X-Sumo-Client")) - assert.Equal(t, "application/x-www-form-urlencoded", req.Header.Get("Content-Type")) - }, - }) - test.s.config.LogFormat = JSONFormat - - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs().AppendEmpty() - log := slgs.LogRecords().AppendEmpty() - - log.Body().SetStr("Example log") - log.Attributes().PutStr("key1", "value1") - log.Attributes().PutStr("key2", "value2") - - log = slgs.LogRecords().AppendEmpty() - log.Body().SetStr("Another example log") - log.Attributes().PutStr("key1", "value1") - log.Attributes().PutStr("key2", "value2") - - _, err := test.s.sendNonOTLPLogs(context.Background(), - rls, - fieldsFromMap(map[string]string{"key": "value"}), - ) - assert.NoError(t, err) - - assert.EqualValues(t, 1, *test.reqCounter) -} - -func TestSendLogsJsonHTLM(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - var regex string - regex += `{"key1":"value1","key2":"value2","log":"Example log"}` - regex += `\n` - regex += `{"key1":"value1","key2":"value2","log":"

Another example log

"}` - assert.Regexp(t, regex, body) - - assert.Equal(t, "key=value", req.Header.Get("X-Sumo-Fields")) - assert.Equal(t, "otelcol", req.Header.Get("X-Sumo-Client")) - assert.Equal(t, "application/x-www-form-urlencoded", req.Header.Get("Content-Type")) - }, - }) - test.s.config.LogFormat = JSONFormat - - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs().AppendEmpty() - log := slgs.LogRecords().AppendEmpty() - - log.Body().SetStr("Example log") - log.Attributes().PutStr("key1", "value1") - log.Attributes().PutStr("key2", "value2") - - log = slgs.LogRecords().AppendEmpty() - log.Body().SetStr("

Another example log

") - log.Attributes().PutStr("key1", "value1") - log.Attributes().PutStr("key2", "value2") - - _, err := test.s.sendNonOTLPLogs(context.Background(), - rls, - fieldsFromMap(map[string]string{"key": "value"}), - ) - assert.NoError(t, err) - - assert.EqualValues(t, 1, *test.reqCounter) -} - -func TestSendLogsJsonMultitype(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - var regex string - regex += `{"key1":"value1","key2":"value2","log":{"lk1":"lv1","lk2":13}}` - regex += `\n` - regex += `{"key1":"value1","key2":"value2","log":\["lv2",13\]}` - assert.Regexp(t, regex, body) - - assert.Equal(t, "key=value", req.Header.Get("X-Sumo-Fields")) - assert.Equal(t, "otelcol", req.Header.Get("X-Sumo-Client")) - assert.Equal(t, "application/x-www-form-urlencoded", req.Header.Get("Content-Type")) - }, - }) - test.s.config.LogFormat = JSONFormat - - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs().AppendEmpty() - - attVal := pcommon.NewValueMap() - attMap := attVal.Map() - attMap.PutStr("lk1", "lv1") - attMap.PutInt("lk2", 13) - - log := slgs.LogRecords().AppendEmpty() - attVal.CopyTo(log.Body()) - - log.Attributes().PutStr("key1", "value1") - log.Attributes().PutStr("key2", "value2") - - log = slgs.LogRecords().AppendEmpty() - - attVal = pcommon.NewValueSlice() - attArr := attVal.Slice() - strVal := pcommon.NewValueStr("lv2") - intVal := pcommon.NewValueInt(13) - - strVal.CopyTo(attArr.AppendEmpty()) - intVal.CopyTo(attArr.AppendEmpty()) - - attVal.CopyTo(log.Body()) - log.Attributes().PutStr("key1", "value1") - log.Attributes().PutStr("key2", "value2") - - _, err := test.s.sendNonOTLPLogs(context.Background(), - rls, - fieldsFromMap(map[string]string{"key": "value"}), - ) - assert.NoError(t, err) - - assert.EqualValues(t, 1, *test.reqCounter) -} - -func TestSendLogsJsonSplit(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - var regex string - regex += `{"key1":"value1","key2":"value2","log":"Example log"}` - assert.Regexp(t, regex, body) - }, - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - var regex string - regex += `{"key1":"value1","key2":"value2","log":"Another example log"}` - assert.Regexp(t, regex, body) - }, - }) - test.s.config.LogFormat = JSONFormat - test.s.config.MaxRequestBodySize = 10 - - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs().AppendEmpty() - log := slgs.LogRecords().AppendEmpty() - - log.Body().SetStr("Example log") - log.Attributes().PutStr("key1", "value1") - log.Attributes().PutStr("key2", "value2") - - log = slgs.LogRecords().AppendEmpty() - log.Body().SetStr("Another example log") - log.Attributes().PutStr("key1", "value1") - log.Attributes().PutStr("key2", "value2") - - _, err := test.s.sendNonOTLPLogs(context.Background(), - rls, - fieldsFromMap(map[string]string{"key": "value"}), - ) - assert.NoError(t, err) - - assert.EqualValues(t, 2, *test.reqCounter) -} - -func TestSendLogsJsonSplitFailedOne(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(500) - - body := extractBody(t, req) - - var regex string - regex += `{"key1":"value1","key2":"value2","log":"Example log"}` - assert.Regexp(t, regex, body) - }, - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - - var regex string - regex += `{"key1":"value1","key2":"value2","log":"Another example log"}` - assert.Regexp(t, regex, body) - }, - }) - test.s.config.LogFormat = JSONFormat - test.s.config.MaxRequestBodySize = 10 - - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs().AppendEmpty() - log := slgs.LogRecords().AppendEmpty() - - log.Body().SetStr("Example log") - log.Attributes().PutStr("key1", "value1") - log.Attributes().PutStr("key2", "value2") - - log = slgs.LogRecords().AppendEmpty() - log.Body().SetStr("Another example log") - log.Attributes().PutStr("key1", "value1") - log.Attributes().PutStr("key2", "value2") - - dropped, err := test.s.sendNonOTLPLogs(context.Background(), - rls, - fieldsFromMap(map[string]string{"key": "value"}), - ) - assert.EqualError(t, err, "failed sending data: status: 500 Internal Server Error") - assert.Len(t, dropped, 1) - - assert.EqualValues(t, 2, *test.reqCounter) -} - -func TestSendLogsJsonSplitFailedAll(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(500) - - body := extractBody(t, req) - - var regex string - regex += `{"key1":"value1","key2":"value2","log":"Example log"}` - assert.Regexp(t, regex, body) - }, - func(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(404) - - body := extractBody(t, req) - - var regex string - regex += `{"key1":"value1","key2":"value2","log":"Another example log"}` - assert.Regexp(t, regex, body) - }, - }) - test.s.config.LogFormat = JSONFormat - test.s.config.MaxRequestBodySize = 10 - - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs().AppendEmpty() - log := slgs.LogRecords().AppendEmpty() - - log.Body().SetStr("Example log") - log.Attributes().PutStr("key1", "value1") - log.Attributes().PutStr("key2", "value2") - - log = slgs.LogRecords().AppendEmpty() - log.Body().SetStr("Another example log") - log.Attributes().PutStr("key1", "value1") - log.Attributes().PutStr("key2", "value2") - - dropped, err := test.s.sendNonOTLPLogs(context.Background(), - rls, - fields{}, - ) - - assert.EqualError( - t, - err, - "failed sending data: status: 500 Internal Server Error\nfailed sending data: status: 404 Not Found", - ) - assert.Len(t, dropped, 2) - - assert.EqualValues(t, 2, *test.reqCounter) -} - -func TestSendLogsUnexpectedFormat(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - }, - }) - test.s.config.LogFormat = "dummy" - - rls := plog.NewResourceLogs() - slgs := rls.ScopeLogs().AppendEmpty() - log := slgs.LogRecords().AppendEmpty() - log.Body().SetStr("Example log") - - dropped, err := test.s.sendNonOTLPLogs(context.Background(), - rls, - fields{}, - ) - assert.Error(t, err) - assert.Len(t, dropped, 1) - assert.Equal(t, []plog.LogRecord{log}, dropped) -} - -func TestSendLogsOTLP(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - //nolint:lll - assert.Equal(t, "\n\x84\x01\n\x00\x12;\n\x00\x127*\r\n\vExample log2\x10\n\x04key1\x12\b\n\x06value12\x10\n\x04key2\x12\b\n\x06value2J\x00R\x00\x12C\n\x00\x12?*\x15\n\x13Another example log2\x10\n\x04key1\x12\b\n\x06value12\x10\n\x04key2\x12\b\n\x06value2J\x00R\x00", body) - - assert.Equal(t, "otelcol", req.Header.Get("X-Sumo-Client")) - assert.Equal(t, "application/x-protobuf", req.Header.Get("Content-Type")) - - assert.Empty(t, req.Header.Get("X-Sumo-Fields"), - "We should not get X-Sumo-Fields header when sending data with OTLP", - ) - assert.Empty(t, req.Header.Get("X-Sumo-Category"), - "We should not get X-Sumo-Category header when sending data with OTLP", - ) - assert.Empty(t, req.Header.Get("X-Sumo-Name"), - "We should not get X-Sumo-Name header when sending data with OTLP", - ) - assert.Empty(t, req.Header.Get("X-Sumo-Host"), - "We should not get X-Sumo-Host header when sending data with OTLP", - ) - }, - }) - - test.s.config.LogFormat = "otlp" - - l := plog.NewLogs() - ls := l.ResourceLogs().AppendEmpty() - - logRecords := exampleTwoLogs() - for i := 0; i < len(logRecords); i++ { - logRecords[i].MoveTo(ls.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty()) - } - - l.MarkReadOnly() - - assert.NoError(t, test.s.sendOTLPLogs(context.Background(), l)) - assert.EqualValues(t, 1, *test.reqCounter) -} - -func TestLogsHandlesReceiverResponses(t *testing.T) { - t.Run("json with too many fields logs a warning", func(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, `{ - "status" : 200, - "id" : "YBLR1-S2T29-MVXEJ", - "code" : "bad.http.header.fields", - "message" : "X-Sumo-Fields Warning: 14 key-value pairs are dropped as they are exceeding maximum key-value pair number limit 30." - }`) - }, - }, func(c *Config) { - c.LogFormat = JSONFormat - }) - - rls := plog.NewResourceLogs() - rls.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty().Body().SetStr("Example log") - - var buffer bytes.Buffer - writer := bufio.NewWriter(&buffer) - test.s.logger = zap.New( - zapcore.NewCore( - zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()), - zapcore.AddSync(writer), - zapcore.DebugLevel, - ), - ) - - _, err := test.s.sendNonOTLPLogs(context.Background(), - rls, - fieldsFromMap( - map[string]string{ - "cluster": "abcaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - "code": "4222222222222222222222222222222222222222222222222222222222222222222222222222222222222", - "component": "apiserver", - "endpoint": "httpsaaaaaaaaaaaaaaaaaaa", - "a": "a", - "b": "b", - "c": "c", - "d": "d", - "e": "e", - "f": "f", - "g": "g", - "q": "q", - "w": "w", - "r": "r", - "t": "t", - "y": "y", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7", - "8": "8", - "9": "9", - "10": "10", - "11": "11", - "12": "12", - "13": "13", - "14": "14", - "15": "15", - "16": "16", - "17": "17", - "18": "18", - "19": "19", - "20": "20", - "21": "21", - "22": "22", - "23": "23", - "24": "24", - "25": "25", - "26": "26", - "27": "27", - "28": "28", - "29": "29", - "_sourceName": "test_source_name", - "_sourceHost": "test_source_host", - "_sourceCategory": "test_source_category", - }), - ) - assert.NoError(t, writer.Flush()) - assert.NoError(t, err) - assert.EqualValues(t, 1, *test.reqCounter) - - assert.Contains(t, - buffer.String(), - `There was an issue sending data {`+ - `"status": "200 OK", `+ - `"id": "YBLR1-S2T29-MVXEJ", `+ - `"code": "bad.http.header.fields", `+ - `"message": "X-Sumo-Fields Warning: 14 key-value pairs are dropped as they are exceeding maximum key-value pair number limit 30."`, - ) - }) -} - -func TestInvalidEndpoint(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){}) - - test.s.dataUrlLogs = ":" - - rls := plog.NewResourceLogs() - rls.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty().Body().SetStr("Example log") - - _, err := test.s.sendNonOTLPLogs(context.Background(), rls, fields{}) - assert.EqualError(t, err, `parse ":": missing protocol scheme`) -} - -func TestInvalidPostRequest(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){}) - - test.s.dataUrlLogs = "" - rls := plog.NewResourceLogs() - rls.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty().Body().SetStr("Example log") - - _, err := test.s.sendNonOTLPLogs(context.Background(), rls, fields{}) - assert.EqualError(t, err, `Post "": unsupported protocol scheme ""`) -} - -func TestInvalidMetricFormat(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){}) - - test.s.config.MetricFormat = "invalid" - - err := test.s.send(context.Background(), MetricsPipeline, newCountingReader(0).withString(""), fields{}) - assert.EqualError(t, err, `unsupported metrics format: invalid`) -} - -func TestInvalidPipeline(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){}) - - err := test.s.send(context.Background(), "invalidPipeline", newCountingReader(0).withString(""), fields{}) - assert.EqualError(t, err, `unknown pipeline type: invalidPipeline`) -} - -func TestSendCompressGzip(t *testing.T) { - test := prepareSenderTest(t, configcompression.TypeGzip, []func(res http.ResponseWriter, req *http.Request){ - func(res http.ResponseWriter, req *http.Request) { - res.WriteHeader(200) - if _, err := res.Write([]byte("")); err != nil { - res.WriteHeader(http.StatusInternalServerError) - assert.FailNow(t, "err: %v", err) - return - } - body := decodeGzip(t, req.Body) - assert.Equal(t, "gzip", req.Header.Get("Content-Encoding")) - assert.Equal(t, "Some example log", body) - }, - }) - - reader := newCountingReader(0).withString("Some example log") - - err := test.s.send(context.Background(), LogsPipeline, reader, fields{}) - require.NoError(t, err) -} - -func TestSendCompressGzipDeprecated(t *testing.T) { - test := prepareSenderTest(t, "default", []func(res http.ResponseWriter, req *http.Request){ - func(res http.ResponseWriter, req *http.Request) { - res.WriteHeader(200) - if _, err := res.Write([]byte("")); err != nil { - res.WriteHeader(http.StatusInternalServerError) - assert.FailNow(t, "err: %v", err) - return - } - body := decodeGzip(t, req.Body) - assert.Equal(t, "gzip", req.Header.Get("Content-Encoding")) - assert.Equal(t, "Some example log", body) - }, - }) - - reader := newCountingReader(0).withString("Some example log") - - err := test.s.send(context.Background(), LogsPipeline, reader, fields{}) - require.NoError(t, err) -} - -func TestSendCompressZstd(t *testing.T) { - test := prepareSenderTest(t, configcompression.TypeZstd, []func(res http.ResponseWriter, req *http.Request){ - func(res http.ResponseWriter, req *http.Request) { - res.WriteHeader(200) - if _, err := res.Write([]byte("")); err != nil { - res.WriteHeader(http.StatusInternalServerError) - assert.FailNow(t, "err: %v", err) - return - } - body := decodeZstd(t, req.Body) - assert.Equal(t, "zstd", req.Header.Get("Content-Encoding")) - assert.Equal(t, "Some example log", body) - }, - }) - - reader := newCountingReader(0).withString("Some example log") - - err := test.s.send(context.Background(), LogsPipeline, reader, fields{}) - require.NoError(t, err) -} - -func TestSendCompressDeflate(t *testing.T) { - test := prepareSenderTest(t, configcompression.TypeDeflate, []func(res http.ResponseWriter, req *http.Request){ - func(res http.ResponseWriter, req *http.Request) { - res.WriteHeader(200) - if _, err := res.Write([]byte("")); err != nil { - res.WriteHeader(http.StatusInternalServerError) - assert.FailNow(t, "err: %v", err) - return - } - body := decodeZlib(t, req.Body) - assert.Equal(t, "deflate", req.Header.Get("Content-Encoding")) - assert.Equal(t, "Some example log", body) - }, - }) - - reader := newCountingReader(0).withString("Some example log") - - err := test.s.send(context.Background(), LogsPipeline, reader, fields{}) - require.NoError(t, err) -} - -func TestSendMetrics(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - expected := `` + - `test.metric.data{test="test_value",test2="second_value"} 14500 1605534165000` + "\n" + - `gauge_metric_name{test="test_value",test2="second_value",remote_name="156920",url="http://example_url"} 124 1608124661166` + "\n" + - `gauge_metric_name{test="test_value",test2="second_value",remote_name="156955",url="http://another_url"} 245 1608124662166` - assert.Equal(t, expected, body) - assert.Equal(t, "otelcol", req.Header.Get("X-Sumo-Client")) - assert.Equal(t, "application/vnd.sumologic.prometheus", req.Header.Get("Content-Type")) - }, - }) - - test.s.config.MetricFormat = PrometheusFormat - - metricSum, attrs := exampleIntMetric() - metricGauge, _ := exampleIntGaugeMetric() - metrics := metricAndAttrsToPdataMetrics( - attrs, - metricSum, metricGauge, - ) - metrics.MarkReadOnly() - - _, errs := test.s.sendNonOTLPMetrics(context.Background(), metrics) - assert.Empty(t, errs) -} - -func TestSendMetricsSplit(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - expected := `` + - `test.metric.data{test="test_value",test2="second_value"} 14500 1605534165000` + "\n" + - `gauge_metric_name{test="test_value",test2="second_value",remote_name="156920",url="http://example_url"} 124 1608124661166` + "\n" + - `gauge_metric_name{test="test_value",test2="second_value",remote_name="156955",url="http://another_url"} 245 1608124662166` - assert.Equal(t, expected, body) - assert.Equal(t, "otelcol", req.Header.Get("X-Sumo-Client")) - assert.Equal(t, "application/vnd.sumologic.prometheus", req.Header.Get("Content-Type")) - }, - }) - - test.s.config.MetricFormat = PrometheusFormat - - metricSum, attrs := exampleIntMetric() - metricGauge, _ := exampleIntGaugeMetric() - metrics := metricAndAttrsToPdataMetrics( - attrs, - metricSum, metricGauge, - ) - metrics.MarkReadOnly() - - _, errs := test.s.sendNonOTLPMetrics(context.Background(), metrics) - assert.Empty(t, errs) -} - -func TestSendOTLPHistogram(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - unmarshaler := pmetric.ProtoUnmarshaler{} - body, err := io.ReadAll(req.Body) - require.NoError(t, err) - metrics, err := unmarshaler.UnmarshalMetrics(body) - require.NoError(t, err) - assert.Equal(t, 3, metrics.MetricCount()) - assert.Equal(t, 16, metrics.DataPointCount()) - }, - }) - test.s.config.DecomposeOtlpHistograms = true - - metricHistogram, attrs := exampleHistogramMetric() - - metrics := pmetric.NewMetrics() - - rms := metrics.ResourceMetrics().AppendEmpty() - attrs.CopyTo(rms.Resource().Attributes()) - metricHistogram.CopyTo(rms.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()) - metrics.MarkReadOnly() - - err := test.s.sendOTLPMetrics(context.Background(), metrics) - assert.NoError(t, err) -} - -func TestSendMetricsSplitBySource(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - expected := `test.metric.data{test="test_value",test2="second_value",_sourceHost="value1"} 14500 1605534165000` - assert.Equal(t, expected, body) - }, - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - expected := `` + - `gauge_metric_name{test="test_value",test2="second_value",_sourceHost="value2",remote_name="156920",url="http://example_url"} 124 1608124661166` + "\n" + - `gauge_metric_name{test="test_value",test2="second_value",_sourceHost="value2",remote_name="156955",url="http://another_url"} 245 1608124662166` - assert.Equal(t, expected, body) - }, - }) - test.s.config.MetricFormat = PrometheusFormat - - metricSum, attrs := exampleIntMetric() - metricGauge, _ := exampleIntGaugeMetric() - - metrics := pmetric.NewMetrics() - metrics.ResourceMetrics().EnsureCapacity(2) - - rmsSum := metrics.ResourceMetrics().AppendEmpty() - attrs.CopyTo(rmsSum.Resource().Attributes()) - rmsSum.Resource().Attributes().PutStr("_sourceHost", "value1") - metricSum.CopyTo(rmsSum.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()) - - rmsGauge := metrics.ResourceMetrics().AppendEmpty() - attrs.CopyTo(rmsGauge.Resource().Attributes()) - rmsGauge.Resource().Attributes().PutStr("_sourceHost", "value2") - metricGauge.CopyTo(rmsGauge.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()) - metrics.MarkReadOnly() - - _, errs := test.s.sendNonOTLPMetrics(context.Background(), metrics) - assert.Empty(t, errs) -} - -func TestSendMetricsSplitFailedOne(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(500) - - body := extractBody(t, req) - expected := `test.metric.data{test="test_value",test2="second_value"} 14500 1605534165000` - assert.Equal(t, expected, body) - }, - func(w http.ResponseWriter, req *http.Request) { - body := extractBody(t, req) - expected := `` + - `gauge_metric_name{test="test_value",test2="second_value",remote_name="156920",url="http://example_url"} 124 1608124661166` + "\n" + - `gauge_metric_name{test="test_value",test2="second_value",remote_name="156955",url="http://another_url"} 245 1608124662166` - assert.Equal(t, expected, body) - }, - }) - test.s.config.MaxRequestBodySize = 10 - test.s.config.MetricFormat = PrometheusFormat - - metricSum, attrs := exampleIntMetric() - metricGauge, _ := exampleIntGaugeMetric() - metrics := pmetric.NewMetrics() - metrics.ResourceMetrics().EnsureCapacity(2) - - rmsSum := metrics.ResourceMetrics().AppendEmpty() - attrs.CopyTo(rmsSum.Resource().Attributes()) - metricSum.CopyTo(rmsSum.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()) - - rmsGauge := metrics.ResourceMetrics().AppendEmpty() - attrs.CopyTo(rmsGauge.Resource().Attributes()) - metricGauge.CopyTo(rmsGauge.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()) - metrics.MarkReadOnly() - - dropped, errs := test.s.sendNonOTLPMetrics(context.Background(), metrics) - assert.Len(t, errs, 1) - assert.EqualError(t, errs[0], "failed sending data: status: 500 Internal Server Error") - require.Equal(t, 1, dropped.MetricCount()) - assert.Equal(t, dropped.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0), metricSum) -} - -func TestSendMetricsSplitFailedAll(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(500) - - body := extractBody(t, req) - expected := `test.metric.data{test="test_value",test2="second_value"} 14500 1605534165000` - assert.Equal(t, expected, body) - }, - func(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(404) - - body := extractBody(t, req) - expected := `` + - `gauge_metric_name{test="test_value",test2="second_value",remote_name="156920",url="http://example_url"} 124 1608124661166` + "\n" + - `gauge_metric_name{test="test_value",test2="second_value",remote_name="156955",url="http://another_url"} 245 1608124662166` - assert.Equal(t, expected, body) - }, - }) - test.s.config.MaxRequestBodySize = 10 - test.s.config.MetricFormat = PrometheusFormat - - metricSum, attrs := exampleIntMetric() - metricGauge, _ := exampleIntGaugeMetric() - metrics := pmetric.NewMetrics() - metrics.ResourceMetrics().EnsureCapacity(2) - - rmsSum := metrics.ResourceMetrics().AppendEmpty() - attrs.CopyTo(rmsSum.Resource().Attributes()) - metricSum.CopyTo(rmsSum.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()) - - rmsGauge := metrics.ResourceMetrics().AppendEmpty() - attrs.CopyTo(rmsGauge.Resource().Attributes()) - metricGauge.CopyTo(rmsGauge.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()) - metrics.MarkReadOnly() - - dropped, errs := test.s.sendNonOTLPMetrics(context.Background(), metrics) - assert.Len(t, errs, 2) - assert.EqualError( - t, - errs[0], - "failed sending data: status: 500 Internal Server Error", - ) - assert.EqualError( - t, - errs[1], - "failed sending data: status: 404 Not Found", - ) - require.Equal(t, 2, dropped.MetricCount()) - assert.Equal(t, dropped.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0), metricSum) - assert.Equal(t, dropped.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(0), metricGauge) -} - -func TestSendMetricsUnexpectedFormat(t *testing.T) { - // Expect no requestes - test := prepareSenderTest(t, NoCompression, nil) - test.s.config.MetricFormat = "invalid" - - metricSum, attrs := exampleIntMetric() - metrics := metricAndAttrsToPdataMetrics(attrs, metricSum) - metrics.MarkReadOnly() - - dropped, errs := test.s.sendNonOTLPMetrics(context.Background(), metrics) - assert.Len(t, errs, 1) - assert.EqualError(t, errs[0], "unexpected metric format: invalid") - require.Equal(t, 1, dropped.MetricCount()) - assert.Equal(t, dropped, metrics) -} - -func TestBadRequestCausesPermanentError(t *testing.T) { - test := prepareSenderTest(t, NoCompression, []func(w http.ResponseWriter, req *http.Request){ - func(res http.ResponseWriter, req *http.Request) { - res.WriteHeader(400) - }, - }) - test.s.config.MetricFormat = OTLPMetricFormat - - err := test.s.send(context.Background(), MetricsPipeline, newCountingReader(0).withString("malformed-request"), fields{}) - assert.True(t, consumererror.IsPermanent(err), "A '400 Bad Request' response from the server should result in a permanent error") -} diff --git a/pkg/exporter/sumologicexporter/test_data_test.go b/pkg/exporter/sumologicexporter/test_data_test.go deleted file mode 100644 index 64712185e6..0000000000 --- a/pkg/exporter/sumologicexporter/test_data_test.go +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright 2020 OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sumologicexporter - -import ( - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" - "go.opentelemetry.io/collector/pdata/ptrace" -) - -func exampleIntMetric() (pmetric.Metric, pcommon.Map) { - return buildExampleIntMetric(true) -} - -func buildExampleIntMetric(fillData bool) (pmetric.Metric, pcommon.Map) { - metric := pmetric.NewMetric() - metric.SetName("test.metric.data") - metric.SetUnit("bytes") - metric.SetEmptySum() - - if fillData { - dp := metric.Sum().DataPoints().AppendEmpty() - dp.SetTimestamp(1605534165 * 1e9) - dp.SetIntValue(14500) - } - - attributes := pcommon.NewMap() - attributes.PutStr("test", "test_value") - attributes.PutStr("test2", "second_value") - - return metric, attributes -} - -func exampleIntGaugeMetric() (pmetric.Metric, pcommon.Map) { - return buildExampleIntGaugeMetric(true) -} - -func buildExampleIntGaugeMetric(fillData bool) (pmetric.Metric, pcommon.Map) { - attributes := pcommon.NewMap() - metric := pmetric.NewMetric() - - metric.SetEmptyGauge() - metric.SetName("gauge_metric_name") - - attributes.PutStr("foo", "bar") - - if fillData { - dp := metric.Gauge().DataPoints().AppendEmpty() - dp.Attributes().PutStr("remote_name", "156920") - dp.Attributes().PutStr("url", "http://example_url") - dp.SetIntValue(124) - dp.SetTimestamp(1608124661.166 * 1e9) - - dp = metric.Gauge().DataPoints().AppendEmpty() - dp.Attributes().PutStr("remote_name", "156955") - dp.Attributes().PutStr("url", "http://another_url") - dp.SetIntValue(245) - dp.SetTimestamp(1608124662.166 * 1e9) - } - - return metric, attributes -} - -func exampleDoubleGaugeMetric() (pmetric.Metric, pcommon.Map) { - return buildExampleDoubleGaugeMetric(true) -} - -func buildExampleDoubleGaugeMetric(fillData bool) (pmetric.Metric, pcommon.Map) { - attributes := pcommon.NewMap() - metric := pmetric.NewMetric() - - metric.SetEmptyGauge() - metric.SetName("gauge_metric_name_double_test") - - attributes.PutStr("foo", "bar") - - if fillData { - dp := metric.Gauge().DataPoints().AppendEmpty() - dp.Attributes().PutStr("local_name", "156720") - dp.Attributes().PutStr("endpoint", "http://example_url") - dp.SetDoubleValue(33.4) - dp.SetTimestamp(1608124661.169 * 1e9) - - dp = metric.Gauge().DataPoints().AppendEmpty() - dp.Attributes().PutStr("local_name", "156155") - dp.Attributes().PutStr("endpoint", "http://another_url") - dp.SetDoubleValue(56.8) - dp.SetTimestamp(1608124662.186 * 1e9) - } - - return metric, attributes -} - -func exampleIntSumMetric() (pmetric.Metric, pcommon.Map) { - return buildExampleIntSumMetric(true) -} - -func buildExampleIntSumMetric(fillData bool) (pmetric.Metric, pcommon.Map) { - attributes := pcommon.NewMap() - metric := pmetric.NewMetric() - - metric.SetEmptySum() - metric.SetName("sum_metric_int_test") - - attributes.PutStr("foo", "bar") - - if fillData { - dp := metric.Sum().DataPoints().AppendEmpty() - dp.Attributes().PutStr("name", "156720") - dp.Attributes().PutStr("address", "http://example_url") - dp.SetIntValue(45) - dp.SetTimestamp(1608124444.169 * 1e9) - - dp = metric.Sum().DataPoints().AppendEmpty() - dp.Attributes().PutStr("name", "156155") - dp.Attributes().PutStr("address", "http://another_url") - dp.SetIntValue(1238) - dp.SetTimestamp(1608124699.186 * 1e9) - } - - return metric, attributes -} - -func exampleDoubleSumMetric() (pmetric.Metric, pcommon.Map) { - return buildExampleDoubleSumMetric(true) -} - -func buildExampleDoubleSumMetric(fillData bool) (pmetric.Metric, pcommon.Map) { - attributes := pcommon.NewMap() - metric := pmetric.NewMetric() - - metric.SetEmptySum() - metric.SetName("sum_metric_double_test") - - attributes.PutStr("foo", "bar") - - if fillData { - dp := metric.Sum().DataPoints().AppendEmpty() - dp.Attributes().PutStr("pod_name", "lorem") - dp.Attributes().PutStr("namespace", "default") - dp.SetDoubleValue(45.6) - dp.SetTimestamp(1618124444.169 * 1e9) - - dp = metric.Sum().DataPoints().AppendEmpty() - dp.Attributes().PutStr("pod_name", "opsum") - dp.Attributes().PutStr("namespace", "kube-config") - dp.SetDoubleValue(1238.1) - dp.SetTimestamp(1608424699.186 * 1e9) - } - - return metric, attributes -} - -func exampleSummaryMetric() (pmetric.Metric, pcommon.Map) { - return buildExampleSummaryMetric(true) -} - -func buildExampleSummaryMetric(fillData bool) (pmetric.Metric, pcommon.Map) { - attributes := pcommon.NewMap() - metric := pmetric.NewMetric() - - metric.SetEmptySummary() - metric.SetName("summary_metric_double_test") - - attributes.PutStr("foo", "bar") - - if fillData { - dp := metric.Summary().DataPoints().AppendEmpty() - dp.Attributes().PutStr("pod_name", "dolor") - dp.Attributes().PutStr("namespace", "sumologic") - dp.SetSum(45.6) - dp.SetCount(3) - dp.SetTimestamp(1618124444.169 * 1e9) - - quantile := dp.QuantileValues().AppendEmpty() - quantile.SetQuantile(0.6) - quantile.SetValue(0.7) - - quantile = dp.QuantileValues().AppendEmpty() - quantile.SetQuantile(2.6) - quantile.SetValue(4) - - dp = metric.Summary().DataPoints().AppendEmpty() - dp.Attributes().PutStr("pod_name", "sit") - dp.Attributes().PutStr("namespace", "main") - dp.SetSum(1238.1) - dp.SetCount(7) - dp.SetTimestamp(1608424699.186 * 1e9) - } - - return metric, attributes -} - -func exampleHistogramMetric() (pmetric.Metric, pcommon.Map) { - return buildExampleHistogramMetric(true) -} - -func buildExampleHistogramMetric(fillData bool) (pmetric.Metric, pcommon.Map) { - attributes := pcommon.NewMap() - metric := pmetric.NewMetric() - - metric.SetEmptyHistogram() - metric.SetName("histogram_metric_double_test") - - attributes.PutStr("bar", "foo") - - if fillData { - dp := metric.Histogram().DataPoints().AppendEmpty() - dp.Attributes().PutStr("container", "dolor") - dp.Attributes().PutStr("branch", "sumologic") - si := pcommon.NewUInt64Slice() - si.FromRaw([]uint64{0, 12, 7, 5, 8, 13}) - si.CopyTo(dp.BucketCounts()) - - sf := pcommon.NewFloat64Slice() - sf.FromRaw([]float64{0.1, 0.2, 0.5, 0.8, 1}) - sf.CopyTo(dp.ExplicitBounds()) - - dp.SetTimestamp(1618124444.169 * 1e9) - dp.SetSum(45.6) - dp.SetCount(7) - - dp = metric.Histogram().DataPoints().AppendEmpty() - dp.Attributes().PutStr("container", "sit") - dp.Attributes().PutStr("branch", "main") - - si = pcommon.NewUInt64Slice() - si.FromRaw([]uint64{0, 10, 1, 1, 4, 6}) - si.CopyTo(dp.BucketCounts()) - - sf = pcommon.NewFloat64Slice() - sf.FromRaw([]float64{0.1, 0.2, 0.5, 0.8, 1}) - sf.CopyTo(dp.ExplicitBounds()) - - dp.SetTimestamp(1608424699.186 * 1e9) - dp.SetSum(54.1) - dp.SetCount(98) - } else { - dp := metric.Histogram().DataPoints().AppendEmpty() - dp.SetCount(0) - } - - return metric, attributes -} - -func metricPairToMetrics(mp ...metricPair) pmetric.Metrics { - metrics := pmetric.NewMetrics() - metrics.ResourceMetrics().EnsureCapacity(len(mp)) - for _, record := range mp { - rms := metrics.ResourceMetrics().AppendEmpty() - record.attributes.CopyTo(rms.Resource().Attributes()) - // TODO: Change metricPair to have an init metric func. - record.metric.CopyTo(rms.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()) - } - - metrics.MarkReadOnly() - return metrics -} - -func metricAndAttrsToPdataMetrics(attributes pcommon.Map, ms ...pmetric.Metric) pmetric.Metrics { - metrics := pmetric.NewMetrics() - metrics.ResourceMetrics().EnsureCapacity(len(ms)) - - rms := metrics.ResourceMetrics().AppendEmpty() - attributes.CopyTo(rms.Resource().Attributes()) - - metricsSlice := rms.ScopeMetrics().AppendEmpty().Metrics() - - for _, record := range ms { - record.CopyTo(metricsSlice.AppendEmpty()) - } - - return metrics -} - -func metricAndAttributesToPdataMetrics(metric pmetric.Metric, attributes pcommon.Map) pmetric.Metrics { - metrics := pmetric.NewMetrics() - metrics.ResourceMetrics().EnsureCapacity(attributes.Len()) - rms := metrics.ResourceMetrics().AppendEmpty() - attributes.CopyTo(rms.Resource().Attributes()) - metric.CopyTo(rms.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()) - - return metrics -} - -func fieldsFromMap(s map[string]string) fields { - attrMap := pcommon.NewMap() - for k, v := range s { - attrMap.PutStr(k, v) - } - return newFields(attrMap) -} - -func exampleTrace() ptrace.Traces { - td := ptrace.NewTraces() - rs := td.ResourceSpans().AppendEmpty() - rs.Resource().Attributes().PutStr("hostname", "testHost") - rs.Resource().Attributes().PutStr("_sourceHost", "source_host") - rs.Resource().Attributes().PutStr("_sourceName", "source_name") - rs.Resource().Attributes().PutStr("_sourceCategory", "source_category") - span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() - span.SetTraceID(pcommon.TraceID([16]byte{0x5B, 0x8E, 0xFF, 0xF7, 0x98, 0x3, 0x81, 0x3, 0xD2, 0x69, 0xB6, 0x33, 0x81, 0x3F, 0xC6, 0xC})) - span.SetSpanID(pcommon.SpanID([8]byte{0xEE, 0xE1, 0x9B, 0x7E, 0xC3, 0xC1, 0xB1, 0x73})) - span.SetName("testSpan") - span.SetStartTimestamp(1544712660000000000) - span.SetEndTimestamp(1544712661000000000) - span.Attributes().PutInt("attr1", 55) - td.MarkReadOnly() - return td -}