diff --git a/.gitlab/benchmarks.yml b/.gitlab/benchmarks.yml
index f252295217f..41753f938c9 100644
--- a/.gitlab/benchmarks.yml
+++ b/.gitlab/benchmarks.yml
@@ -153,7 +153,7 @@ benchmarks:
script:
- export ARTIFACTS_DIR="$(pwd)/artifacts" && (mkdir "${ARTIFACTS_DIR}" || :)
- git config --global url."https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.ddbuild.io/DataDog/".insteadOf "https://github.com/DataDog/"
- - git clone --branch dd-trace-rb https://github.com/DataDog/benchmarking-platform platform && cd platform
+ - git clone --branch dd-trace-rb https://github.com/DataDog/benchmarking-platform benchmarking-platform && cd benchmarking-platform
- ./steps/capture-hardware-software-info.sh
- ./steps/run-benchmarks.sh
- ./steps/analyze-results.sh
diff --git a/Rakefile b/Rakefile
index ee43950a83a..38034f2cfe3 100644
--- a/Rakefile
+++ b/Rakefile
@@ -67,7 +67,7 @@ TEST_METADATA = {
'elasticsearch-8' => '✅ 2.5 / ✅ 2.6 / ✅ 2.7 / ✅ 3.0 / ✅ 3.1 / ✅ 3.2 / ✅ 3.3 / ✅ 3.4 / ✅ jruby'
},
'ethon' => {
- 'http' => '✅ 2.5 / ✅ 2.6 / ✅ 2.7 / ✅ 3.0 / ✅ 3.1 / ✅ 3.2 / ✅ 3.3 / ✅ 3.4 / ✅ jruby'
+ 'http' => '✅ 2.5 / ✅ 2.6 / ✅ 2.7 / ✅ 3.0 / ✅ 3.1 / ✅ 3.2 / ✅ 3.3 / ✅ 3.4 / ❌ jruby'
},
'excon' => {
'http' => '✅ 2.5 / ✅ 2.6 / ✅ 2.7 / ✅ 3.0 / ✅ 3.1 / ✅ 3.2 / ✅ 3.3 / ✅ 3.4 / ✅ jruby'
@@ -101,7 +101,7 @@ TEST_METADATA = {
'http' => '✅ 2.5 / ✅ 2.6 / ✅ 2.7 / ✅ 3.0 / ✅ 3.1 / ✅ 3.2 / ✅ 3.3 / ✅ 3.4 / ✅ jruby'
},
'httprb' => {
- 'http' => '✅ 2.5 / ✅ 2.6 / ✅ 2.7 / ✅ 3.0 / ✅ 3.1 / ✅ 3.2 / ✅ 3.3 / ✅ 3.4 / ✅ jruby'
+ 'http' => '✅ 2.5 / ✅ 2.6 / ✅ 2.7 / ✅ 3.0 / ✅ 3.1 / ✅ 3.2 / ✅ 3.3 / ✅ 3.4 / ❌ jruby'
},
'kafka' => {
'activesupport' => '✅ 2.5 / ✅ 2.6 / ✅ 2.7 / ✅ 3.0 / ✅ 3.1 / ✅ 3.2 / ✅ 3.3 / ✅ 3.4 / ✅ jruby'
diff --git a/docs/GettingStarted.md b/docs/GettingStarted.md
index 3e3f2f9177f..aab0189e297 100644
--- a/docs/GettingStarted.md
+++ b/docs/GettingStarted.md
@@ -309,10 +309,16 @@ You can enable it through `Datadog.configure`:
require 'datadog'
Datadog.configure do |c|
- c.tracing.instrument :action_cable
+ c.tracing.instrument :action_cable, **options
end
```
+`options` are the following keyword arguments:
+
+| Key | Env Var | Type | Description | Default |
+| --------- | ------------------------------- | ------ | -------------------------------------------- | ------- |
+| `enabled` | `DD_TRACE_ACTION_CABLE_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+
### Action Mailer
The Action Mailer integration provides tracing for Rails 5 ActionMailer actions.
@@ -329,9 +335,10 @@ end
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| ------------ | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
-| `email_data` | `Bool` | Whether or not to append additional email payload metadata to `action_mailer.deliver` spans. Fields include `['subject', 'to', 'from', 'bcc', 'cc', 'date', 'perform_deliveries']`. | `false` |
+| Key | Env Var | Type | Description | Default |
+| ------------ | - | ----- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
+| `enabled` | `DD_TRACE_ACTION_MAILER_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `email_data` | | `Bool` | Whether or not to append additional email payload metadata to `action_mailer.deliver` spans. Fields include `['subject', 'to', 'from', 'bcc', 'cc', 'date', 'perform_deliveries']`. | `false` |
### Action Pack
@@ -342,10 +349,16 @@ require 'actionpack'
require 'datadog'
Datadog.configure do |c|
- c.tracing.instrument :action_pack
+ c.tracing.instrument :action_pack, **options
end
```
+`options` are the following keyword arguments:
+
+| Key | Env Var | Type | Description | Default |
+| --------- | ------------------------------- | ------ | -------------------------------------------- | ------- |
+| `enabled` | `DD_TRACE_ACTION_PACK_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+
### Action View
Most of the time, Action View is set up as part of Rails, but it can be activated separately:
@@ -361,9 +374,10 @@ end
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| -------------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------- |
-| `template_base_path` | `String` | Used when the template name is parsed. If you don't store your templates in the `views/` folder, you may need to change this value | `'views/'` |
+| Key | Env Var | Type | Description | Default |
+| -------------------- | - | ------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------- |
+| `enabled` | `DD_TRACE_ACTION_VIEW_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `template_base_path` | | `String` | Used when the template name is parsed. If you don't store your templates in the `views/` folder, you may need to change this value | `'views/'` |
### Active Job
@@ -374,12 +388,18 @@ require 'active_job'
require 'datadog'
Datadog.configure do |c|
- c.tracing.instrument :active_job
+ c.tracing.instrument :active_job, **options
end
ExampleJob.perform_later
```
+`options` are the following keyword arguments:
+
+| Key | Env Var | Type | Description | Default |
+| --------- | ------------------------------- | ------ | -------------------------------------------- | ------- |
+| `enabled` | `DD_TRACE_ACTIVE_JOB_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+
### Active Model Serializers
The Active Model Serializers integration traces the `serialize` event for version 0.9+ and the `render` event for version 0.10+.
@@ -389,13 +409,19 @@ require 'active_model_serializers'
require 'datadog'
Datadog.configure do |c|
- c.tracing.instrument :active_model_serializers
+ c.tracing.instrument :active_model_serializers, **options
end
my_object = MyModel.new(name: 'my object')
ActiveModelSerializers::SerializableResource.new(test_obj).serializable_hash
```
+`options` are the following keyword arguments:
+
+| Key | Env Var | Type | Description | Default |
+| --------- | ------------------------------- | ------ | -------------------------------------------- | ------- |
+| `enabled` | `DD_TRACE_ACTIVE_MODEL_SERIALIZERS_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+
### Active Record
Most of the time, Active Record is set up as part of a web framework (Rails, Sinatra...) however, it can be set up alone:
@@ -419,9 +445,10 @@ end
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| -------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
-| `service_name` | `String` | Override the service name for the SQL query instrumentation. ActiveRecord instantiation instrumentation always uses the application's configured service name. | Name of database adapter (e.g. `'mysql2'`) |
+| Key | Env Var | Type | Description | Default |
+| -------------- | - | ------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
+| `enabled` | `DD_TRACE_ACTIVE_RECORD_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `service_name` | | `String` | Override the service name for the SQL query instrumentation. ActiveRecord instantiation instrumentation always uses the application's configured service name. | Name of database adapter (e.g. `'mysql2'`) |
**Configuring trace settings per database**
@@ -507,9 +534,10 @@ cache.read('city')
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| --------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- |
-| `cache_service` | `String` | Name of application running the `active_support` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `active_support-cache` |
+| Key | Env Var | Type | Description | Default |
+| --------------- | - | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- |
+| `enabled` | `DD_TRACE_ACTIVE_SUPPORT_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `cache_service` | | `String` | Name of application running the `active_support` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `active_support-cache` |
### AWS
@@ -531,6 +559,7 @@ Aws::S3::Client.new.list_buckets
| Key | Env Var | Type | Description | Default |
| -------------- | --------------------------- | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
+| `enabled` | `DD_TRACE_AWS_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_AWS_SERVICE_NAME` | `String` | Name of application running the `aws` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `aws` |
| `peer_service` | `DD_TRACE_AWS_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
@@ -544,7 +573,7 @@ To activate your integration, use the `Datadog.configure` method:
# Inside Rails initializer or equivalent
Datadog.configure do |c|
# Patches ::Concurrent::Future to use ExecutorService that propagates context
- c.tracing.instrument :concurrent_ruby
+ c.tracing.instrument :concurrent_ruby, **options
end
# Pass context into code executed within Concurrent::Future
@@ -566,6 +595,12 @@ Datadog::Tracing.trace('outer') do
end
```
+`options` are the following keyword arguments:
+
+| Key | Env Var | Type | Description | Default |
+| --------- | ------------------------------- | ------ | -------------------------------------------- | ------- |
+| `enabled` | `DD_TRACE_CONCURRENT_RUBY_ENABLED` | `Bool` | Whether the integration propagates contexts. | `true` |
+
### Dalli
Dalli integration will trace all calls to your `memcached` server:
@@ -588,6 +623,7 @@ client.set('abc', 123)
| Key | Env Var | Type | Description | Default |
| ----------------- | ------------------------------------ | -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- |
+| `enabled` | `DD_TRACE_DALLI_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `command_enabled` | `DD_TRACE_MEMCACHED_COMMAND_ENABLED` | `Bool` | Collect commands as the `memcached.command` tag. Command `keys` can potentially contain sensitive information. | `false` |
| `service_name` | `DD_TRACE_DALLI_SERVICE_NAME` | `String` | Name of application running the `dalli` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `memcached` |
| `peer_service` | `DD_TRACE_DALLI_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
@@ -608,9 +644,10 @@ end
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| ---------- | ------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
-| `on_error` | `Proc` | Custom error handler invoked when a job raises an error. Provided `span` and `error` as arguments. Sets error on the span by default. Useful for ignoring transient errors. | `proc { \|span, error\| span.set_error(error) unless span.nil? }` |
+| Key | Env Var | Type | Description | Default |
+| ---------- | - | ----- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
+| `enabled` | `DD_TRACE_DELAYED_JOB_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `on_error` | | `Proc` | Custom error handler invoked when a job raises an error. Provided `span` and `error` as arguments. Sets error on the span by default. Useful for ignoring transient errors. | `proc { \|span, error\| span.set_error(error) unless span.nil? }` |
### Elasticsearch
@@ -636,6 +673,7 @@ Datadog.configure_onto(client.transport, **options)
| Key | Env Var | Type | Description | Default |
| -------------- | ------------------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- |
+| `enabled` | `DD_TRACE_ELASTICSEARCH_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_ELASTICSEARCH_SERVICE_NAME` | `String` | Name of application running the `elasticsearch` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `elasticsearch` |
| `peer_service` | `DD_TRACE_ELASTICSEARCH_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
| `quantize` | | `Hash` | Hash containing options for quantization. May include `:show` with an Array of keys to not quantize (or `:all` to skip quantization), or `:exclude` with Array of keys to exclude entirely. | `{}` |
@@ -662,6 +700,7 @@ end
| Key | Env Var | Type | Description | Default |
| --------------------- | ----------------------------- | -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
+| `enabled` | `DD_TRACE_ETHON_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_ETHON_SERVICE_NAME` | `String` | Name of application running the `ethon` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `ethon` |
| `peer_service` | `DD_TRACE_ETHON_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
| `distributed_tracing` | | `Bool` | Enables [distributed tracing](#distributed-tracing) | `true` |
@@ -694,6 +733,7 @@ connection.get
| Key | Env Var | Type | Description | Default |
| --------------------- | ----------------------------------- | ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
+| `enabled` | `DD_TRACE_EXCON_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_EXCON_SERVICE_NAME` | `String` | Name of application running the `excon` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `excon` |
| `peer_service` | `DD_TRACE_EXCON_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
| `distributed_tracing` | | `Bool` | Enables [distributed tracing](#distributed-tracing) | `true` |
@@ -758,6 +798,7 @@ connection.get('/foo')
| Key | Env Var | Type | Description | Default |
| --------------------- | ------------------------------------- | ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
+| `enabled` | `DD_TRACE_FARADAY_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_FARADAY_SERVICE_NAME` | `String` | Name of application running the `faraday` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `faraday` |
| `peer_service` | `DD_TRACE_FARADAY_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
| `distributed_tracing` | | `Bool` | Enables [distributed tracing](#distributed-tracing) | `true` |
@@ -793,7 +834,7 @@ end
| Key | Env Var | Type | Description | Default |
| -------------------- | ----------------------------------- | ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- |
-| `enabled` | `DD_TRACE_GRAPE_ENABLED` | `Bool` | Defines whether Grape should be traced. Useful for temporarily disabling tracing. `true` or `false` | `true` |
+| `enabled` | `DD_TRACE_GRAPE_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `error_status_codes` | `DD_TRACE_GRAPE_ERROR_STATUS_CODES` | `Array`\|`Range` | Defines HTTP status codes that are traced as errors. Value can be a range (`400...600`), or an array of ranges/integers `[403, 500...600]`. If configured with environment variable, use dash for range (`'400-599'`) and comma for adding element into an array (`'403,500-599'`) | `500...600` |
### GraphQL
@@ -814,12 +855,13 @@ YourSchema.execute(query, variables: {}, context: {}, operation_name: nil)
The `instrument :graphql` method accepts the following parameters. Additional options can be substituted in for `options`:
-| Key | Type | Description | Default |
-| ------------------------ | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- |
-| `schemas` | `Array` | Array of `GraphQL::Schema` objects (that support class-based schema only) to trace. If you do not provide any, then tracing will applied to all the schemas. | `[]` |
-| `with_unified_tracer` | `Bool` | Enable to instrument with `UnifiedTrace` tracer, enabling support for API Catalog. `with_deprecated_tracer` has priority over this. Default is `false`, using `GraphQL::Tracing::DataDogTrace` (Added in v2.2) | `false` |
-| `with_deprecated_tracer` | `Bool` | Enable to instrument with deprecated `GraphQL::Tracing::DataDogTracing`. This has priority over `with_unified_tracer`. Default is `false`, using `GraphQL::Tracing::DataDogTrace` | `false` |
-| `service_name` | `String` | Service name used for graphql instrumentation | `'ruby-graphql'` |
+| Key | Env Var | Type | Description | Default |
+| ------------------------ | - | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------- |
+| `enabled` | `DD_TRACE_GRAPHQL_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `schemas` | | `Array` | Array of `GraphQL::Schema` objects (that support class-based schema only) to trace. If you do not provide any, then tracing will applied to all the schemas. | `[]` |
+| `with_unified_tracer` | | `Bool` | Enable to instrument with `UnifiedTrace` tracer, enabling support for API Catalog. `with_deprecated_tracer` has priority over this. Default is `false`, using `GraphQL::Tracing::DataDogTrace` (Added in v2.2) | `false` |
+| `with_deprecated_tracer` | | `Bool` | Enable to instrument with deprecated `GraphQL::Tracing::DataDogTracing`. Default is `false`, using `GraphQL::Tracing::DataDogTrace` | `false` |
+| `service_name` | | `String` | Service name used for graphql instrumentation | `'ruby-graphql'` |
**Manually configuring GraphQL schemas**
@@ -899,6 +941,7 @@ client.my_endpoint(DemoMessage.new(contents: 'hello!'))
| Key | Env Var | Type | Description | Default |
| --------------------- | ---------------------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------ |
+| `enabled` | `DD_TRACE_GRPC_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_GRPC_SERVICE_NAME` | `String` | Name of application running the `grpc` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `grpc` |
| `peer_service` | `DD_TRACE_GRPC_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
| `distributed_tracing` | | `Bool` | Enables [distributed tracing](#distributed-tracing) | `true` |
@@ -941,9 +984,10 @@ end
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| -------------- | -------- | ------------------------------------------ | ------- |
-| `service_name` | `String` | Service name for `hanami` instrumentation. | `nil` |
+| Key | Env Var | Type | Description | Default |
+| -------------- | - | ------- | ------------------------------------------ | ------- |
+| `enabled` | `DD_TRACE_HANAMI_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `service_name` | | `String` | Service name for `hanami` instrumentation. | `nil` |
### http.rb
@@ -966,6 +1010,7 @@ end
| Key | Env Var | Type | Description | Default |
| --------------------- | ------------------------------------ | ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- |
+| `enabled` | `DD_TRACE_HTTPRB_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_HTTPRB_SERVICE_NAME` | `String` | Name of application running the `httprb` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `httprb` |
| `peer_service` | `DD_TRACE_HTTPRB_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
| `distributed_tracing` | | `Bool` | Enables [distributed tracing](#distributed-tracing) | `true` |
@@ -993,6 +1038,7 @@ end
| Key | Env Var | Type | Description | Default |
| --------------------- | ---------------------------------------- | ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
+| `enabled` | `DD_TRACE_HTTPCLIENT_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_HTTPCLIENT_SERVICE_NAME` | `String` | Name of application running the `httpclient` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `httpclient` |
| `peer_service` | `DD_TRACE_HTTPCLIENT_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
| `distributed_tracing` | | `Bool` | Enables [distributed tracing](#distributed-tracing) | `true` |
@@ -1030,10 +1076,16 @@ require 'kafka'
require 'datadog'
Datadog.configure do |c|
- c.tracing.instrument :kafka
+ c.tracing.instrument :kafka, **options
end
```
+`options` are the following keyword arguments:
+
+| Key | Env Var | Type | Description | Default |
+| --------- | ------------------------------- | ------ | -------------------------------------------- | ------- |
+| `enabled` | `DD_TRACE_KAFKA_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+
### MongoDB
The integration traces any `Command` that is sent from the [MongoDB Ruby Driver](https://github.com/mongodb/mongo-ruby-driver) to a MongoDB cluster. By extension, Object Document Mappers (ODM) such as Mongoid are automatically instrumented if they use the official Ruby driver. To activate the integration, simply:
@@ -1059,6 +1111,7 @@ Datadog.configure_onto(client, **options)
| Key | Env Var | Type | Description | Default |
| -------------- | ----------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ |
+| `enabled` | `DD_TRACE_MONGO_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_MONGO_SERVICE_NAME` | `String` | Name of application running the `mongo` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `mongodb` |
| `peer_service` | `DD_TRACE_MONGO_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
| `quantize` | | `Hash` | Hash containing options for quantization. May include `:show` with an Array of keys to not quantize (or `:all` to skip quantization), or `:exclude` with Array of keys to exclude entirely. | `{ show: [:collection, :database, :operation] }` |
@@ -1114,6 +1167,7 @@ client.query("SELECT * FROM users WHERE group='x'")
| Key | Env Var | Type | Description | Default |
| --------------------- | ------------------------------ | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------- |
+| `enabled` | `DD_TRACE_MYSQL2_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_MYSQL2_SERVICE_NAME` | `String` | Name of application running the `mysql2` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `mysql2` |
| `peer_service` | `DD_TRACE_MYSQL2_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
| `comment_propagation` | `DD_DBM_PROPAGATION_MODE` | `String` | SQL comment propagation mode for database monitoring.
(example: `disabled` \| `service`\| `full`).
**Important**: _Note that enabling SQL comment propagation results in potentially confidential data (service names) being stored in the databases which can then be accessed by other third parties that have been granted access to the database._ | `'disabled'` |
@@ -1149,6 +1203,7 @@ content = Net::HTTP.get(URI('http://127.0.0.1/index.html'))
| Key | Env Var | Type | Description | Default |
| --------------------- | ---------------------------------- | ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- |
+| `enabled` | `DD_TRACE_HTTP_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_NET_HTTP_SERVICE_NAME` | `String` | Name of application running the `net/http` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `net/http` |
| `peer_service` | `DD_TRACE_NET_HTTP_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
| `distributed_tracing` | | `Bool` | Enables [distributed tracing](#distributed-tracing) | `true` |
@@ -1187,6 +1242,7 @@ client.cluster.health
| Key | Env Var | Type | Description | Default |
| -------------- | ---------------------------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
+| `enabled` | `DD_TRACE_OPENSEARCH_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_OPENSEARCH_SERVICE_NAME` | `String` | Name of application running the `opensearch` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `opensearch` |
| `peer_service` | `DD_TRACE_OPENSEARCH_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
| `quantize` | | `Hash` | Hash containing options for quantization. May include `:show` with an Array of keys to not quantize (or `:all` to skip quantization), or `:exclude` with Array of keys to exclude entirely. | `{}` |
@@ -1212,7 +1268,7 @@ end
| Key | Env Var | Type | Description | Default |
| --------------------- | -------------------------- | ------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
-| `enabled` | | Defines whether Postgres should be traced. | `true` |
+| `enabled` | `DD_TRACE_PG_ENABLED` | `true` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_PG_SERVICE_NAME` | `String` | Name of application running the `pg` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `pg` |
| `peer_service` | `DD_TRACE_PG_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
| `comment_propagation` | `DD_DBM_PROPAGATION_MODE` | `String` | SQL comment propagation mode for database monitoring.
(example: `disabled` \| `service`\| `full`).
**Important**: _Note that enabling sql comment propagation results in potentially confidential data (service names) being stored in the databases which can then be accessed by other 3rd parties that have been granted access to the database._ | `'disabled'` |
@@ -1247,6 +1303,7 @@ client.run("select * from system.nodes")
| Key | Env Var | Type | Description | Default |
| -------------- | ------------------------------ | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- |
+| `enabled` | `DD_TRACE_PRESTO_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_PRESTO_SERVICE_NAME` | `String` | Name of application running the `presto` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `presto` |
| `peer_service` | `DD_TRACE_PRESTO_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
@@ -1268,7 +1325,7 @@ end
| Key | Env Var | Type | Description | Default |
| ---------- | ------------------------------- | ------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------ |
-| `enabled` | `DD_TRACE_QUE_ENABLED` | `Bool` | Defines whether Que should be traced. Useful for temporarily disabling tracing. `true` or `false` | `true` |
+| `enabled` | `DD_TRACE_QUE_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `tag_args` | `DD_TRACE_QUE_TAG_ARGS_ENABLED` | `Bool` | Enable tagging of a job's args field. `true` for on, `false` for off. | `false` |
| `tag_data` | `DD_TRACE_QUE_TAG_DATA_ENABLED` | `Bool` | Enable tagging of a job's data field. `true` for on, `false` for off. | `false` |
| `on_error` | | `Proc` | Custom error handler invoked when a job raises an error. Provided `span` and `error` as arguments. Sets error on the span by default. Useful for ignoring transient errors. | `proc { \|span, error \| span.set_error(error) unless span.nil? }` |
@@ -1291,6 +1348,7 @@ end
| Key | Env Var | Type | Description | Default |
| -------------- | ------------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- |
+| `enabled` | `DD_TRACE_RACECAR_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_RACECAR_SERVICE_NAME` | `String` | Name of application running the `racecar` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `racecar` |
### Rack
@@ -1318,23 +1376,24 @@ run app
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| -------------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------ |
-| `application` | ??? | Your Rack application. Required for `middleware_names`. | `nil` |
-| `distributed_tracing` | `Bool` | Enables [distributed tracing](#distributed-tracing) so that this service trace is connected with a trace of another service if tracing headers are received | `true` |
-| `headers` | `Hash` | Hash of HTTP request or response headers to add as tags to the `rack.request`. Accepts `request` and `response` keys with Array values e.g. `['Last-Modified']`. Adds `http.request.headers.*` and `http.response.headers.*` tags respectively. This option overrides the global `DD_TRACE_HEADER_TAGS`, see [Applying header tags to root spans][header tags] for more information. | `{ response: ['Content-Type', 'X-Request-ID'] }` |
-| `middleware_names` | `Bool` | Enable this if you want to use the last executed middleware class as the resource name for the `rack` span. If enabled alongside the `rails` instrumention, `rails` takes precedence by setting the `rack` resource name to the active `rails` controller when applicable. Requires `application` option to use. | `false` |
-| `quantize` | `Hash` | Hash containing options for quantization. May include `:query` or `:fragment`. | `{}` |
-| `quantize.base` | | Defines behavior for URL base (scheme, host, port). May be `:show` to keep URL base in `http.url` tag and not set `http.base_url` tag, or `nil` to remove URL base from `http.url` tag by default, leaving a path and setting `http.base_url`. Option must be nested inside the `quantize` option. | `nil` |
-| `quantize.query` | | Hash containing options for query portion of URL quantization. May include `:show` or `:exclude`. See options below. Option must be nested inside the `quantize` option. | `{}` |
-| `quantize.query.show` | | Defines which values should always be shown. May be an Array of strings, `:all` to show all values, or `nil` to show no values. Option must be nested inside the `query` option. | `nil` |
-| `quantize.query.exclude` | | Defines which values should be removed entirely. May be an Array of strings, `:all` to remove the query string entirely, or `nil` to exclude nothing. Option must be nested inside the `query` option. | `nil` |
-| `quantize.query.obfuscate` | | Defines query string redaction behaviour. May be a hash of options, `:internal` to use the default internal obfuscation settings, or `nil` to disable obfuscation. Note that obfuscation is a string-wise operation, not a key-value operation. When enabled, `query.show` defaults to `:all` if otherwise unset. Option must be nested inside the `query` option. | `nil` |
-| `quantize.query.obfuscate.with` | | Defines the string to replace obfuscated matches with. May be a String. Option must be nested inside the `query.obfuscate` option. | `''` |
-| `quantize.query.obfuscate.regex` | | Defines the regex with which the query string will be redacted. May be a Regexp, or `:internal` to use the default internal Regexp, which redacts well-known sensitive data. Each match is redacted entirely by replacing it with `query.obfuscate.with`. Option must be nested inside the `query.obfuscate` option. | `:internal` |
-| `quantize.fragment` | | Defines behavior for URL fragments. May be `:show` to show URL fragments, or `nil` to remove fragments. Option must be nested inside the `quantize` option. | `nil` |
-| `request_queuing` | `Bool` | Track HTTP request time spent in the queue of the frontend server. See [HTTP request queuing](#http-request-queuing) for setup details. | `false` |
-| `web_service_name` | `String` | Service name for frontend server request queuing spans. (e.g. `'nginx'`) | `'web-server'` |
+| Key | Env Var | Type | Description | Default |
+| -------------------------------- | - | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------ |
+| `enabled` | `DD_TRACE_RACK_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `application` | | [`Rack Application`](https://github.com/rack/rack/blob/800e53fbe15b3424b7a8946b067bf6f2e648d5a8/SPEC.rdoc#label-Rack+applications) | Your Rack application. Required for `middleware_names`. | `nil` |
+| `distributed_tracing` | | `Bool` | Enables [distributed tracing](#distributed-tracing) so that this service trace is connected with a trace of another service if tracing headers are received | `true` |
+| `headers` | | `Hash` | Hash of HTTP request or response headers to add as tags to the `rack.request`. Accepts `request` and `response` keys with Array values e.g. `['Last-Modified']`. Adds `http.request.headers.*` and `http.response.headers.*` tags respectively. This option overrides the global `DD_TRACE_HEADER_TAGS`, see [Applying header tags to root spans][header tags] for more information. | `{ response: ['Content-Type', 'X-Request-ID'] }` |
+| `middleware_names` | | `Bool` | Enable this if you want to use the last executed middleware class as the resource name for the `rack` span. If enabled alongside the `rails` instrumention, `rails` takes precedence by setting the `rack` resource name to the active `rails` controller when applicable. Requires `application` option to use. | `false` |
+| `quantize` | | `Hash` | Hash containing options for quantization. May include `:query` or `:fragment`. | `{}` |
+| `quantize.base` | | | Defines behavior for URL base (scheme, host, port). May be `:show` to keep URL base in `http.url` tag and not set `http.base_url` tag, or `nil` to remove URL base from `http.url` tag by default, leaving a path and setting `http.base_url`. Option must be nested inside the `quantize` option. | `nil` |
+| `quantize.query` | | | Hash containing options for query portion of URL quantization. May include `:show` or `:exclude`. See options below. Option must be nested inside the `quantize` option. | `{}` |
+| `quantize.query.show` | | | Defines which values should always be shown. May be an Array of strings, `:all` to show all values, or `nil` to show no values. Option must be nested inside the `query` option. | `nil` |
+| `quantize.query.exclude` | | | Defines which values should be removed entirely. May be an Array of strings, `:all` to remove the query string entirely, or `nil` to exclude nothing. Option must be nested inside the `query` option. | `nil` |
+| `quantize.query.obfuscate` | | | Defines query string redaction behaviour. May be a hash of options, `:internal` to use the default internal obfuscation settings, or `nil` to disable obfuscation. Note that obfuscation is a string-wise operation, not a key-value operation. When enabled, `query.show` defaults to `:all` if otherwise unset. Option must be nested inside the `query` option. | `nil` |
+| `quantize.query.obfuscate.with` | | | Defines the string to replace obfuscated matches with. May be a String. Option must be nested inside the `query.obfuscate` option. | `''` |
+| `quantize.query.obfuscate.regex` | | | Defines the regex with which the query string will be redacted. May be a Regexp, or `:internal` to use the default internal Regexp, which redacts well-known sensitive data. Each match is redacted entirely by replacing it with `query.obfuscate.with`. Option must be nested inside the `query.obfuscate` option. | `:internal` |
+| `quantize.fragment` | | | Defines behavior for URL fragments. May be `:show` to show URL fragments, or `nil` to remove fragments. Option must be nested inside the `quantize` option. | `nil` |
+| `request_queuing` | | `Bool` | Track HTTP request time spent in the queue of the frontend server. See [HTTP request queuing](#http-request-queuing) for setup details. | `false` |
+| `web_service_name` | | `String` | Service name for frontend server request queuing spans. (e.g. `'nginx'`) | `'web-server'` |
Deprecation notice:
@@ -1408,14 +1467,15 @@ end
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| --------------------- | -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------- |
-| `distributed_tracing` | `Bool` | Enables [distributed tracing](#distributed-tracing) so that this service trace is connected with a trace of another service if tracing headers are received | `true` |
-| `request_queuing` | `Bool` | Track HTTP request time spent in the queue of the frontend server. See [HTTP request queuing](#http-request-queuing) for setup details. | `false` |
-| `middleware` | `Bool` | Add the trace middleware to the Rails application. Set to `false` if you don't want the middleware to load. | `true` |
-| `middleware_names` | `Bool` | Enables any short-circuited middleware requests to display the middleware name as a resource for the trace. | `false` |
-| `service_name` | `String` | Service name used when tracing application requests (on the `rack` level) | `''` (inferred from your Rails application namespace) |
-| `template_base_path` | `String` | Used when the template name is parsed. If you don't store your templates in the `views/` folder, you may need to change this value | `'views/'` |
+| Key | Env Var | Type | Description | Default |
+| --------------------- | - | ------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------- |
+| `enabled` | `DD_TRACE_RAILS_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `distributed_tracing` | | `Bool` | Enables [distributed tracing](#distributed-tracing) so that this service trace is connected with a trace of another service if tracing headers are received | `true` |
+| `request_queuing` | | `Bool` | Track HTTP request time spent in the queue of the frontend server. See [HTTP request queuing](#http-request-queuing) for setup details. | `false` |
+| `middleware` | | `Bool` | Add the trace middleware to the Rails application. Set to `false` if you don't want the middleware to load. | `true` |
+| `middleware_names` | | `Bool` | Enables any short-circuited middleware requests to display the middleware name as a resource for the trace. | `false` |
+| `service_name` | | `String` | Service name used when tracing application requests (on the `rack` level) | `''` (inferred from your Rails application namespace) |
+| `template_base_path` | | `String` | Used when the template name is parsed. If you don't store your templates in the `views/` folder, you may need to change this value | `'views/'` |
**Supported versions**
@@ -1455,12 +1515,13 @@ Rake::Task['my_task'].invoke
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| -------------- | -------- | -------------------------------------------------------------------------------------------------------- | -------- |
-| `enabled` | `Bool` | Defines whether Rake tasks should be traced. Useful for temporarily disabling tracing. `true` or `false` | `true` |
-| `quantize` | `Hash` | Hash containing options for quantization of task arguments. See below for more details and examples. | `{}` |
-| `service_name` | `String` | Service name used for `rake` instrumentation | `'rake'` |
-| `tasks` | `Array` | Names of the Rake tasks to instrument | `[]` |
+| Key | Env Var| Type | Description | Default |
+| -------------- | - | ------- | -------------------------------------------------------------------------------------------------------- | -------- |
+| `enabled` | | `Bool` | Defines whether Rake tasks should be traced. Useful for temporarily disabling tracing. `true` or `false` | `true` |
+| `quantize` | | `Hash` | Hash containing options for quantization of task arguments. See below for more details and examples. | `{}` |
+| `service_name` | | `String` | Service name used for `rake` instrumentation | `'rake'` |
+| `tasks` | | `Array` | Names of the Rake tasks to instrument | `[]` |
+| `enabled` | `DD_TRACE_RAKE_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
**Configuring task quantization behavior**
@@ -1517,6 +1578,7 @@ redis.set 'foo', 'bar'
| Key | Env Var | Type | Description | Default |
| -------------- | ----------------------------- | -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
+| `enabled` | `DD_TRACE_REDIS_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_REDIS_SERVICE_NAME` | `String` | Name of application running the `redis` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `redis` |
| `peer_service` | `DD_TRACE_REDIS_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
| `command_args` | `DD_REDIS_COMMAND_ARGS` | `Bool` | Show the command arguments (for example, `key` in `GET key`) as resource name and tag. If `false`, only the command name is shown (for example, `GET`). | false |
@@ -1630,9 +1692,10 @@ end
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| ---------- | ------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
-| `on_error` | `Proc` | Custom error handler invoked when a job raises an error. Provided `span` and `error` as arguments. Sets error on the span by default. Useful for ignoring transient errors. | `proc { \|span, error\| span.set_error(error) unless span.nil? }` |
+| Key | Env Var | Type | Description | Default |
+| ---------- | - | ----- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
+| `enabled` | `DD_TRACE_RESQUE_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `on_error` | | `Proc` | Custom error handler invoked when a job raises an error. Provided `span` and `error` as arguments. Sets error on the span by default. Useful for ignoring transient errors. | `proc { \|span, error\| span.set_error(error) unless span.nil? }` |
### Rest Client
@@ -1651,6 +1714,7 @@ end
| Key | Env Var | Type | Description | Default |
| --------------------- | ----------------------------------- | -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- |
+| `enabled` | `DD_TRACE_REST_CLIENT_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_REST_CLIENT_SERVICE_NAME` | `String` | Name of application running the `rest_client` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `rest_client` |
| `peer_service` | `DD_TRACE_REST_CLIENT_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
| `distributed_tracing` | | `Bool` | Enables [distributed tracing](#distributed-tracing) | `true` |
@@ -1685,9 +1749,10 @@ end
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| -------------- | -------- | ---------------------------------------- | ------- |
-| `service_name` | `String` | Service name for `roda` instrumentation. | `nil` |
+| Key | Env Var | Type | Description | Default |
+| -------------- | - | ------- | ---------------------------------------- | ------- |
+| `enabled` | `DD_TRACE_RODA_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `service_name` | | `String` | Service name for `roda` instrumentation. | `nil` |
### Sequel
@@ -1717,9 +1782,10 @@ articles.all
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| -------------- | -------- | ----------------------------------------- | ------------------------------------------ |
-| `service_name` | `String` | Service name for `sequel` instrumentation | Name of database adapter (e.g. `'mysql2'`) |
+| Key | Env Var | Type | Description | Default |
+| -------------- | - | ------- | ----------------------------------------- | ------------------------------------------ |
+| `enabled` | `DD_TRACE_SEQUEL_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `service_name` | | `String` | Service name for `sequel` instrumentation | Name of database adapter (e.g. `'mysql2'`) |
**Configuring databases to use different settings**
@@ -1750,10 +1816,11 @@ end
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| ---------- | ------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
-| `tag_body` | `Bool` | Tag spans with the SQS message body `true` or `false` | `false` |
-| `on_error` | `Proc` | Custom error handler invoked when a job raises an error. Provided `span` and `error` as arguments. Sets error on the span by default. Useful for ignoring transient errors. | `proc { \|span, error\| span.set_error(error) unless span.nil? }` |
+| Key | Env Var | Type | Description | Default |
+| ---------- | - | ----- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
+| `enabled` | `DD_TRACE_SHORYUKEN_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `tag_body` | | `Bool` | Tag spans with the SQS message body `true` or `false` | `false` |
+| `on_error` | | `Proc` | Custom error handler invoked when a job raises an error. Provided `span` and `error` as arguments. Sets error on the span by default. Useful for ignoring transient errors. | `proc { \|span, error\| span.set_error(error) unless span.nil? }` |
### Sidekiq
@@ -1771,11 +1838,12 @@ end
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| --------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
-| `distributed_tracing` | `Bool` | Enabling [distributed tracing](#distributed-tracing) creates a parent-child relationship between the `sidekiq.push` span and the `sidekiq.job` span.
**Important**: _Enabling distributed_tracing for asynchronous processing can result in drastic changes in your trace graph. Such cases include long running jobs, retried jobs, and jobs scheduled in the far future. Make sure to inspect your traces after enabling this feature._ | `false` |
-| `on_error` | `Proc` | Custom error handler invoked when a job raises an error. Provided `span` and `error` as arguments. Sets error on the span by default. Useful for ignoring transient errors. | `proc { \|span, error\| span.set_error(error) unless span.nil? }` |
-| `quantize` | `Hash` | Hash containing options for quantization of job arguments. | `{}` |
+| Key | Env Var | Type | Description | Default |
+| --------------------- | - | ----- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
+| `enabled` | `DD_TRACE_SIDEKIQ_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `distributed_tracing` | | `Bool` | Enabling [distributed tracing](#distributed-tracing) creates a parent-child relationship between the `sidekiq.push` span and the `sidekiq.job` span.
**Important**: _Enabling distributed_tracing for asynchronous processing can result in drastic changes in your trace graph. Such cases include long running jobs, retried jobs, and jobs scheduled in the far future. Make sure to inspect your traces after enabling this feature._ | `false` |
+| `on_error` | | `Proc` | Custom error handler invoked when a job raises an error. Provided `span` and `error` as arguments. Sets error on the span by default. Useful for ignoring transient errors. | `proc { \|span, error\| span.set_error(error) unless span.nil? }` |
+| `quantize` | | `Hash` | Hash containing options for quantization of job arguments. | `{}` |
### Sinatra
@@ -1827,11 +1895,12 @@ end
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| ----------------------- | ------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ |
-| `distributed_tracing` | `Bool` | Enables [distributed tracing](#distributed-tracing) so that this service trace is connected with a trace of another service if tracing headers are received | `true` |
-| `headers` | `Hash` | Hash of HTTP request or response headers to add as tags to the `sinatra.request`. Accepts `request` and `response` keys with Array values e.g. `['Last-Modified']`. Adds `http.request.headers.*` and `http.response.headers.*` tags respectively. This option overrides the global `DD_TRACE_HEADER_TAGS`, see [Applying header tags to root spans][header tags] for more information. | `{ response: ['Content-Type', 'X-Request-ID'] }` |
-| `resource_script_names` | `Bool` | Prepend resource names with script name | `false` |
+| Key | Env Var | Type | Description | Default |
+| ----------------------- | - | ----- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ |
+| `enabled` | `DD_TRACE_SINATRA_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `distributed_tracing` | | `Bool` | Enables [distributed tracing](#distributed-tracing) so that this service trace is connected with a trace of another service if tracing headers are received | `true` |
+| `headers` | | `Hash` | Hash of HTTP request or response headers to add as tags to the `sinatra.request`. Accepts `request` and `response` keys with Array values e.g. `['Last-Modified']`. Adds `http.request.headers.*` and `http.response.headers.*` tags respectively. This option overrides the global `DD_TRACE_HEADER_TAGS`, see [Applying header tags to root spans][header tags] for more information. | `{ response: ['Content-Type', 'X-Request-ID'] }` |
+| `resource_script_names` | | `Bool` | Prepend resource names with script name | `false` |
### Sneakers
@@ -1849,11 +1918,11 @@ end
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| ---------- | ------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
-| `enabled` | `Bool` | Defines whether Sneakers should be traced. Useful for temporarily disabling tracing. `true` or `false` | `true` |
-| `tag_body` | `Bool` | Enable tagging of job message. `true` for on, `false` for off. | `false` |
-| `on_error` | `Proc` | Custom error handler invoked when a job raises an error. Provided `span` and `error` as arguments. Sets error on the span by default. Useful for ignoring transient errors. | `proc { \|span, error\| span.set_error(error) unless span.nil? }` |
+| Key | Env Var | Type | Description | Default |
+| ---------- | - | ----- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
+| `enabled` | `DD_TRACE_SNEAKERS_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+| `tag_body` | | `Bool` | Enable tagging of job message. `true` for on, `false` for off. | `false` |
+| `on_error` | | `Proc` | Custom error handler invoked when a job raises an error. Provided `span` and `error` as arguments. Sets error on the span by default. Useful for ignoring transient errors. | `proc { \|span, error\| span.set_error(error) unless span.nil? }` |
### Stripe
@@ -1871,9 +1940,9 @@ end
`options` are the following keyword arguments:
-| Key | Type | Description | Default |
-| --------- | ------ | ---------------------------------------------------------------------------------------------------- | ------- |
-| `enabled` | `Bool` | Defines whether Stripe should be traced. Useful for temporarily disabling tracing. `true` or `false` | `true` |
+| Key | Env Var | Type | Description | Default |
+| --------- | ------------------------------- | ------ | -------------------------------------------- | ------- |
+| `enabled` | `DD_TRACE_STRIPE_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
### Sucker Punch
@@ -1883,13 +1952,19 @@ The `sucker_punch` integration traces all scheduled jobs:
require 'datadog'
Datadog.configure do |c|
- c.tracing.instrument :sucker_punch
+ c.tracing.instrument :sucker_punch, **options
end
# Execution of this job is traced
LogJob.perform_async('login')
```
+`options` are the following keyword arguments:
+
+| Key | Env Var | Type | Description | Default |
+| --------- | ------------------------------- | ------ | -------------------------------------------- | ------- |
+| `enabled` | `DD_TRACE_SUCKER_PUNCH_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
+
### Trilogy
The trilogy integration traces any SQL command sent through the `trilogy` gem.
@@ -1910,6 +1985,7 @@ client.query("SELECT * FROM users WHERE group='x'")
| Key | Env Var | Type | Description | Default |
| -------------- | ------------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- |
+| `enabled` | `DD_TRACE_TRILOGY_ENABLED` | `Bool` | Whether the integration should create spans. | `true` |
| `service_name` | `DD_TRACE_TRILOGY_SERVICE_NAME` | `String` | Name of application running the `trilogy` instrumentation. May be overridden by `global_default_service_name`. [See _Additional Configuration_ for more details](#additional-configuration) | `trilogy` |
| `peer_service` | `DD_TRACE_TRILOGY_PEER_SERVICE` | `String` | Name of external service the application connects to | `nil` |
diff --git a/lib/datadog/core/configuration.rb b/lib/datadog/core/configuration.rb
index bcaa6432564..19d3ff27110 100644
--- a/lib/datadog/core/configuration.rb
+++ b/lib/datadog/core/configuration.rb
@@ -84,23 +84,16 @@ def configure
configuration = self.configuration
yield(configuration)
- built_components = false
-
- components = safely_synchronize do |write_components|
+ safely_synchronize do |write_components|
write_components.call(
if components?
replace_components!(configuration, @components)
else
- components = build_components(configuration)
- built_components = true
- components
+ build_components(configuration)
end
)
end
- # Should only be called the first time components are built
- components.telemetry.started! if built_components
-
configuration
end
@@ -200,20 +193,13 @@ def components(allow_initialization: true)
current_components = COMPONENTS_READ_LOCK.synchronize { defined?(@components) && @components }
return current_components if current_components || !allow_initialization
- built_components = false
-
- components = safely_synchronize do |write_components|
+ safely_synchronize do |write_components|
if defined?(@components) && @components
@components
else
- built_components = true
write_components.call(build_components(configuration))
end
end
-
- # Should only be called the first time components are built
- components&.telemetry&.started! if built_components
- components
end
private
diff --git a/lib/datadog/core/configuration/components.rb b/lib/datadog/core/configuration/components.rb
index f7e00d7a981..665485c6f0c 100644
--- a/lib/datadog/core/configuration/components.rb
+++ b/lib/datadog/core/configuration/components.rb
@@ -6,7 +6,7 @@
require_relative '../diagnostics/health'
require_relative '../logger'
require_relative '../runtime/metrics'
-require_relative '../telemetry/client'
+require_relative '../telemetry/component'
require_relative '../workers/runtime_metrics'
require_relative '../remote/component'
@@ -62,7 +62,7 @@ def build_telemetry(settings, agent_settings, logger)
logger.debug { "Telemetry disabled. Agent network adapter not supported: #{agent_settings.adapter}" }
end
- Telemetry::Client.new(
+ Telemetry::Component.new(
enabled: enabled,
heartbeat_interval_seconds: settings.telemetry.heartbeat_interval_seconds,
dependency_collection: settings.telemetry.dependency_collection
@@ -169,8 +169,9 @@ def shutdown!(replacement = nil)
unused_statsd = (old_statsd - (old_statsd & new_statsd))
unused_statsd.each(&:close)
- telemetry.stop!
+ # enqueue closing event before stopping telemetry so it will be send out on shutdown
telemetry.emit_closing! unless replacement
+ telemetry.stop!
end
end
end
diff --git a/lib/datadog/core/telemetry/client.rb b/lib/datadog/core/telemetry/client.rb
deleted file mode 100644
index 172145c9342..00000000000
--- a/lib/datadog/core/telemetry/client.rb
+++ /dev/null
@@ -1,95 +0,0 @@
-# frozen_string_literal: true
-
-require_relative 'emitter'
-require_relative 'event'
-require_relative 'heartbeat'
-require_relative '../utils/forking'
-
-module Datadog
- module Core
- module Telemetry
- # Telemetry entrypoint, coordinates sending telemetry events at various points in app lifecycle.
- class Client
- attr_reader \
- :enabled,
- :unsupported
-
- include Core::Utils::Forking
-
- # @param enabled [Boolean] Determines whether telemetry events should be sent to the API
- # @param heartbeat_interval_seconds [Float] How frequently heartbeats will be reported, in seconds.
- # @param [Boolean] dependency_collection Whether to send the `app-dependencies-loaded` event
- def initialize(heartbeat_interval_seconds:, dependency_collection:, enabled: true)
- @enabled = enabled
- @emitter = Emitter.new
- @stopped = false
- @unsupported = false
- @started = false
- @dependency_collection = dependency_collection
-
- @worker = Telemetry::Heartbeat.new(enabled: @enabled, heartbeat_interval_seconds: heartbeat_interval_seconds) do
- next unless @started # `started!` should be the first event, thus ensure that `heartbeat!` is not sent first.
-
- heartbeat!
- end
- end
-
- def disable!
- @enabled = false
- @worker.enabled = false
- end
-
- def started!
- return if !@enabled || forked?
-
- res = @emitter.request(Event::AppStarted.new)
-
- if res.not_found? # Telemetry is only supported by agent versions 7.34 and up
- Datadog.logger.debug('Agent does not support telemetry; disabling future telemetry events.')
- disable!
- @unsupported = true # Prevent telemetry from getting re-enabled
- return res
- end
-
- @emitter.request(Event::AppDependenciesLoaded.new) if @dependency_collection
-
- @started = true
- end
-
- def emit_closing!
- return if !@enabled || forked?
-
- @emitter.request(Event::AppClosing.new)
- end
-
- def stop!
- return if @stopped
-
- @worker.stop(true, 0)
- @stopped = true
- end
-
- def integrations_change!
- return if !@enabled || forked?
-
- @emitter.request(Event::AppIntegrationsChange.new)
- end
-
- # Report configuration changes caused by Remote Configuration.
- def client_configuration_change!(changes)
- return if !@enabled || forked?
-
- @emitter.request(Event::AppClientConfigurationChange.new(changes, 'remote_config'))
- end
-
- private
-
- def heartbeat!
- return if !@enabled || forked?
-
- @emitter.request(Event::AppHeartbeat.new)
- end
- end
- end
- end
-end
diff --git a/lib/datadog/core/telemetry/component.rb b/lib/datadog/core/telemetry/component.rb
new file mode 100644
index 00000000000..0d5046e4391
--- /dev/null
+++ b/lib/datadog/core/telemetry/component.rb
@@ -0,0 +1,66 @@
+# frozen_string_literal: true
+
+require_relative 'emitter'
+require_relative 'event'
+require_relative 'worker'
+require_relative '../utils/forking'
+
+module Datadog
+ module Core
+ module Telemetry
+ # Telemetry entrypoint, coordinates sending telemetry events at various points in app lifecycle.
+ class Component
+ attr_reader :enabled
+
+ include Core::Utils::Forking
+
+ # @param enabled [Boolean] Determines whether telemetry events should be sent to the API
+ # @param heartbeat_interval_seconds [Float] How frequently heartbeats will be reported, in seconds.
+ # @param [Boolean] dependency_collection Whether to send the `app-dependencies-loaded` event
+ def initialize(heartbeat_interval_seconds:, dependency_collection:, enabled: true)
+ @enabled = enabled
+ @stopped = false
+
+ @worker = Telemetry::Worker.new(
+ enabled: @enabled,
+ heartbeat_interval_seconds: heartbeat_interval_seconds,
+ emitter: Emitter.new,
+ dependency_collection: dependency_collection
+ )
+ @worker.start
+ end
+
+ def disable!
+ @enabled = false
+ @worker.enabled = false
+ end
+
+ def stop!
+ return if @stopped
+
+ @worker.stop(true)
+ @stopped = true
+ end
+
+ def emit_closing!
+ return if !@enabled || forked?
+
+ @worker.enqueue(Event::AppClosing.new)
+ end
+
+ def integrations_change!
+ return if !@enabled || forked?
+
+ @worker.enqueue(Event::AppIntegrationsChange.new)
+ end
+
+ # Report configuration changes caused by Remote Configuration.
+ def client_configuration_change!(changes)
+ return if !@enabled || forked?
+
+ @worker.enqueue(Event::AppClientConfigurationChange.new(changes, 'remote_config'))
+ end
+ end
+ end
+ end
+end
diff --git a/lib/datadog/core/telemetry/event.rb b/lib/datadog/core/telemetry/event.rb
index 49b292878bb..60a9886eb1e 100644
--- a/lib/datadog/core/telemetry/event.rb
+++ b/lib/datadog/core/telemetry/event.rb
@@ -286,6 +286,33 @@ def type
'app-closing'
end
end
+
+ # Telemetry class for the 'generate-metrics' event
+ class GenerateMetrics < Base
+ def type
+ 'generate-metrics'
+ end
+
+ def initialize(namespace, metric_series)
+ super()
+ @namespace = namespace
+ @metric_series = metric_series
+ end
+
+ def payload(_)
+ {
+ namespace: @namespace,
+ series: @metric_series.map(&:to_h)
+ }
+ end
+ end
+
+ # Telemetry class for the 'distributions' event
+ class Distributions < GenerateMetrics
+ def type
+ 'distributions'
+ end
+ end
end
end
end
diff --git a/lib/datadog/core/telemetry/heartbeat.rb b/lib/datadog/core/telemetry/heartbeat.rb
deleted file mode 100644
index b2129504e68..00000000000
--- a/lib/datadog/core/telemetry/heartbeat.rb
+++ /dev/null
@@ -1,33 +0,0 @@
-# frozen_string_literal: true
-
-require_relative '../worker'
-require_relative '../workers/polling'
-
-module Datadog
- module Core
- module Telemetry
- # Periodically sends a heartbeat event to the telemetry API.
- class Heartbeat < Core::Worker
- include Core::Workers::Polling
-
- def initialize(heartbeat_interval_seconds:, enabled: true, &block)
- # Workers::Polling settings
- self.enabled = enabled
- # Workers::IntervalLoop settings
- self.loop_base_interval = heartbeat_interval_seconds
- self.fork_policy = Core::Workers::Async::Thread::FORK_POLICY_STOP
- super(&block)
- start
- end
-
- def loop_wait_before_first_iteration?; end
-
- private
-
- def start
- perform
- end
- end
- end
- end
-end
diff --git a/lib/datadog/core/telemetry/http/adapters/net.rb b/lib/datadog/core/telemetry/http/adapters/net.rb
index e49b321c436..3aa65e6d49d 100644
--- a/lib/datadog/core/telemetry/http/adapters/net.rb
+++ b/lib/datadog/core/telemetry/http/adapters/net.rb
@@ -15,7 +15,7 @@ class Net
:timeout,
:ssl
- DEFAULT_TIMEOUT = 30
+ DEFAULT_TIMEOUT = 2
def initialize(hostname:, port: nil, timeout: DEFAULT_TIMEOUT, ssl: true)
@hostname = hostname
diff --git a/lib/datadog/core/telemetry/metric.rb b/lib/datadog/core/telemetry/metric.rb
new file mode 100644
index 00000000000..2c6ba8d97de
--- /dev/null
+++ b/lib/datadog/core/telemetry/metric.rb
@@ -0,0 +1,149 @@
+# frozen_string_literal: true
+
+module Datadog
+ module Core
+ module Telemetry
+ # Telemetry metrics data model (internal Datadog metrics for client libraries)
+ module Metric
+ def self.metric_id(type, name, tags = [])
+ "#{type}::#{name}::#{tags.join(',')}"
+ end
+
+ # Base class for all metric types
+ class Base
+ attr_reader :name, :tags, :values, :common, :interval
+
+ # @param name [String] metric name
+ # @param tags [Array|Hash{String=>String}] metric tags as hash of array of "tag:val" strings
+ # @param common [Boolean] true if the metric is common for all languages, false for Ruby-specific metric
+ # @param interval [Integer] metrics aggregation interval in seconds
+ def initialize(name, tags: {}, common: true, interval: nil)
+ @name = name
+ @values = []
+ @tags = tags_to_array(tags)
+ @common = common
+ @interval = interval
+ end
+
+ def track(value); end
+
+ def type; end
+
+ def to_h
+ # @type var res: Hash[Symbol, untyped]
+ res = {
+ metric: name,
+ points: values,
+ type: type,
+ tags: tags,
+ common: common
+ }
+ res[:interval] = interval if interval
+ res
+ end
+
+ private
+
+ def tags_to_array(tags)
+ return tags if tags.is_a?(Array)
+
+ tags.map { |k, v| "#{k}:#{v}" }
+ end
+ end
+
+ # Count metric adds up all the submitted values in a time interval. This would be suitable for a
+ # metric tracking the number of website hits, for instance.
+ class Count < Base
+ TYPE = 'count'
+
+ def type
+ TYPE
+ end
+
+ def inc(value = 1)
+ track(value)
+ end
+
+ def dec(value = 1)
+ track(-value)
+ end
+
+ def track(value)
+ if values.empty?
+ values << [Time.now.to_i, value]
+ else
+ values[0][0] = Time.now.to_i
+ values[0][1] += value
+ end
+ end
+ end
+
+ # A gauge type takes the last value reported during the interval. This type would make sense for tracking RAM or
+ # CPU usage, where taking the last value provides a representative picture of the host’s behavior during the time
+ # interval.
+ class Gauge < Base
+ TYPE = 'gauge'
+
+ def type
+ TYPE
+ end
+
+ def track(value)
+ if values.empty?
+ values << [Time.now.to_i, value]
+ else
+ values[0][0] = Time.now.to_i
+ values[0][1] = value
+ end
+ end
+ end
+
+ # The rate type takes the count and divides it by the length of the time interval. This is useful if you’re
+ # interested in the number of hits per second.
+ class Rate < Base
+ TYPE = 'rate'
+
+ def initialize(name, tags: {}, common: true, interval: nil)
+ super
+
+ @value = 0.0
+ end
+
+ def type
+ TYPE
+ end
+
+ def track(value = 1.0)
+ @value += value
+
+ rate = interval ? @value / interval : 0.0
+ @values = [[Time.now.to_i, rate]]
+ end
+ end
+
+ # Distribution metric represents the global statistical distribution of a set of values.
+ class Distribution < Base
+ TYPE = 'distributions'
+
+ def type
+ TYPE
+ end
+
+ def track(value)
+ values << value
+ end
+
+ # distribution metric data does not have type field
+ def to_h
+ {
+ metric: name,
+ points: values,
+ tags: tags,
+ common: common
+ }
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/lib/datadog/core/telemetry/worker.rb b/lib/datadog/core/telemetry/worker.rb
new file mode 100644
index 00000000000..57633251ac7
--- /dev/null
+++ b/lib/datadog/core/telemetry/worker.rb
@@ -0,0 +1,156 @@
+# frozen_string_literal: true
+
+require_relative 'event'
+
+require_relative '../utils/only_once_successful'
+require_relative '../workers/polling'
+require_relative '../workers/queue'
+
+module Datadog
+ module Core
+ module Telemetry
+ # Accumulates events and sends them to the API at a regular interval, including heartbeat event.
+ class Worker
+ include Core::Workers::Queue
+ include Core::Workers::Polling
+
+ DEFAULT_BUFFER_MAX_SIZE = 1000
+ APP_STARTED_EVENT_RETRIES = 10
+
+ TELEMETRY_STARTED_ONCE = Utils::OnlyOnceSuccessful.new(APP_STARTED_EVENT_RETRIES)
+
+ def initialize(
+ heartbeat_interval_seconds:,
+ emitter:,
+ dependency_collection:,
+ enabled: true,
+ shutdown_timeout: Workers::Polling::DEFAULT_SHUTDOWN_TIMEOUT,
+ buffer_size: DEFAULT_BUFFER_MAX_SIZE
+ )
+ @emitter = emitter
+ @dependency_collection = dependency_collection
+
+ # Workers::Polling settings
+ self.enabled = enabled
+ # Workers::IntervalLoop settings
+ self.loop_base_interval = heartbeat_interval_seconds
+ self.fork_policy = Core::Workers::Async::Thread::FORK_POLICY_STOP
+
+ @shutdown_timeout = shutdown_timeout
+ @buffer_size = buffer_size
+
+ self.buffer = buffer_klass.new(@buffer_size)
+ end
+
+ def start
+ return if !enabled? || forked?
+
+ # starts async worker
+ perform
+ end
+
+ def stop(force_stop = false, timeout = @shutdown_timeout)
+ buffer.close if running?
+
+ flush_events(dequeue) if work_pending?
+
+ super
+ end
+
+ def enqueue(event)
+ return if !enabled? || forked?
+
+ buffer.push(event)
+ end
+
+ def sent_started_event?
+ TELEMETRY_STARTED_ONCE.success?
+ end
+
+ def failed_to_start?
+ TELEMETRY_STARTED_ONCE.failed?
+ end
+
+ private
+
+ def perform(*events)
+ return if !enabled? || forked?
+
+ started! unless sent_started_event?
+
+ heartbeat!
+
+ flush_events(events)
+ end
+
+ def flush_events(events)
+ return if events.nil?
+ return if !enabled? || !sent_started_event?
+
+ Datadog.logger.debug { "Sending #{events&.count} telemetry events" }
+ events.each do |event|
+ send_event(event)
+ end
+ end
+
+ def heartbeat!
+ return if !enabled? || !sent_started_event?
+
+ send_event(Event::AppHeartbeat.new)
+ end
+
+ def started!
+ return unless enabled?
+
+ if failed_to_start?
+ Datadog.logger.debug('Telemetry app-started event exhausted retries, disabling telemetry worker')
+ self.enabled = false
+ return
+ end
+
+ TELEMETRY_STARTED_ONCE.run do
+ res = send_event(Event::AppStarted.new)
+
+ if res.ok?
+ Datadog.logger.debug('Telemetry app-started event is successfully sent')
+
+ send_event(Event::AppDependenciesLoaded.new) if @dependency_collection
+
+ true
+ else
+ Datadog.logger.debug('Error sending telemetry app-started event, retry after heartbeat interval...')
+ false
+ end
+ end
+ end
+
+ def send_event(event)
+ res = @emitter.request(event)
+
+ disable_on_not_found!(res)
+
+ res
+ end
+
+ def dequeue
+ buffer.pop
+ end
+
+ def buffer_klass
+ if Core::Environment::Ext::RUBY_ENGINE == 'ruby'
+ Core::Buffer::CRuby
+ else
+ Core::Buffer::ThreadSafe
+ end
+ end
+
+ def disable_on_not_found!(response)
+ return unless response.not_found?
+
+ Datadog.logger.debug('Agent does not support telemetry; disabling future telemetry events.')
+ self.enabled = false
+ end
+ end
+ end
+ end
+end
diff --git a/lib/datadog/core/utils/only_once_successful.rb b/lib/datadog/core/utils/only_once_successful.rb
new file mode 100644
index 00000000000..ed8b4141963
--- /dev/null
+++ b/lib/datadog/core/utils/only_once_successful.rb
@@ -0,0 +1,76 @@
+# frozen_string_literal: true
+
+require_relative 'only_once'
+
+module Datadog
+ module Core
+ module Utils
+ # Helper class to execute something with only one success.
+ #
+ # This is useful for cases where we want to ensure that a block of code is only executed once, and only if it
+ # succeeds. One such example is sending app-started telemetry event.
+ #
+ # Successful execution is determined by the return value of the block: any truthy value is considered success.
+ #
+ # Thread-safe when used correctly (e.g. be careful of races when lazily initializing instances of this class).
+ #
+ # Note: In its current state, this class is not Ractor-safe.
+ # In https://github.com/DataDog/dd-trace-rb/pull/1398#issuecomment-797378810 we have a discussion of alternatives,
+ # including an alternative implementation that is Ractor-safe once spent.
+ class OnlyOnceSuccessful < OnlyOnce
+ def initialize(limit = 0)
+ super()
+
+ @limit = limit
+ @failed = false
+ @retries = 0
+ end
+
+ def run
+ @mutex.synchronize do
+ return if @ran_once
+
+ result = yield
+ @ran_once = !!result
+
+ if !@ran_once && limited?
+ @retries += 1
+ check_limit!
+ end
+
+ result
+ end
+ end
+
+ def success?
+ @mutex.synchronize { @ran_once && !@failed }
+ end
+
+ def failed?
+ @mutex.synchronize { @ran_once && @failed }
+ end
+
+ private
+
+ def check_limit!
+ if @retries >= @limit
+ @failed = true
+ @ran_once = true
+ end
+ end
+
+ def limited?
+ !@limit.nil? && @limit.positive?
+ end
+
+ def reset_ran_once_state_for_tests
+ @mutex.synchronize do
+ @ran_once = false
+ @failed = false
+ @retries = 0
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/sig/datadog/core/telemetry/client.rbs b/sig/datadog/core/telemetry/component.rbs
similarity index 57%
rename from sig/datadog/core/telemetry/client.rbs
rename to sig/datadog/core/telemetry/component.rbs
index 9bd2f4a97cc..4d411d32578 100644
--- a/sig/datadog/core/telemetry/client.rbs
+++ b/sig/datadog/core/telemetry/component.rbs
@@ -1,38 +1,26 @@
module Datadog
module Core
module Telemetry
- class Client
+ class Component
@enabled: bool
- @dependency_collection: bool
- @started: bool
@stopped: bool
- @emitter: Datadog::Core::Telemetry::Emitter
- @unsupported: bool
- @worker: Datadog::Core::Telemetry::Heartbeat
+ @worker: Datadog::Core::Telemetry::Worker
attr_reader enabled: bool
- attr_reader unsupported: bool
-
include Core::Utils::Forking
- def initialize: (heartbeat_interval_seconds: Numeric, dependency_collection: bool, enabled: bool) -> void
+ def initialize: (heartbeat_interval_seconds: Numeric, dependency_collection: bool, ?enabled: bool) -> void
def disable!: () -> void
def client_configuration_change!: (Enumerable[[String, Numeric | bool | String]] changes) -> void
- def started!: () -> void
-
def emit_closing!: () -> void
def stop!: () -> void
def integrations_change!: () -> void
-
- private
-
- def heartbeat!: () -> void
end
end
end
diff --git a/sig/datadog/core/telemetry/event.rbs b/sig/datadog/core/telemetry/event.rbs
index 4e0e3824109..791f014f9a4 100644
--- a/sig/datadog/core/telemetry/event.rbs
+++ b/sig/datadog/core/telemetry/event.rbs
@@ -55,6 +55,16 @@ module Datadog
class AppClosing < Base
end
+
+ class GenerateMetrics < Base
+ @namespace: String
+ @metric_series: Enumerable[Datadog::Core::Telemetry::Metric::Base]
+
+ def initialize: (String namespace, Enumerable[Datadog::Core::Telemetry::Metric::Base] metric_series) -> void
+ end
+
+ class Distributions < GenerateMetrics
+ end
end
end
end
diff --git a/sig/datadog/core/telemetry/heartbeat.rbs b/sig/datadog/core/telemetry/heartbeat.rbs
deleted file mode 100644
index b89aeedca8b..00000000000
--- a/sig/datadog/core/telemetry/heartbeat.rbs
+++ /dev/null
@@ -1,20 +0,0 @@
-module Datadog
- module Core
- module Telemetry
- class Heartbeat < Core::Worker
- include Core::Workers::Polling
- include Core::Workers::Async::Thread
- include Core::Workers::Async::Thread::PrependedMethods
- include Core::Workers::IntervalLoop
-
- def initialize: (?enabled: bool, heartbeat_interval_seconds: Numeric) ?{ () -> void } -> void
-
- def loop_wait_before_first_iteration?: () -> bool?
-
- private
-
- def start: () -> void
- end
- end
- end
-end
diff --git a/sig/datadog/core/telemetry/http/adapters/net.rbs b/sig/datadog/core/telemetry/http/adapters/net.rbs
index 5cf50e53adf..311c5989f95 100644
--- a/sig/datadog/core/telemetry/http/adapters/net.rbs
+++ b/sig/datadog/core/telemetry/http/adapters/net.rbs
@@ -14,7 +14,7 @@ module Datadog
attr_reader ssl: bool
- DEFAULT_TIMEOUT: 30
+ DEFAULT_TIMEOUT: 2
def initialize: (hostname: String, ?port: Integer?, ?timeout: Float | Integer, ?ssl: bool?) -> void
diff --git a/sig/datadog/core/telemetry/http/ext.rbs b/sig/datadog/core/telemetry/http/ext.rbs
index 22cea7d1fd0..11271822da3 100644
--- a/sig/datadog/core/telemetry/http/ext.rbs
+++ b/sig/datadog/core/telemetry/http/ext.rbs
@@ -21,7 +21,7 @@ module Datadog
CONTENT_TYPE_APPLICATION_JSON: "application/json"
- API_VERSION: "v1"
+ API_VERSION: "v2"
AGENT_ENDPOINT: "/telemetry/proxy/api/v2/apmtelemetry"
end
diff --git a/sig/datadog/core/telemetry/metric.rbs b/sig/datadog/core/telemetry/metric.rbs
new file mode 100644
index 00000000000..3eda8cda131
--- /dev/null
+++ b/sig/datadog/core/telemetry/metric.rbs
@@ -0,0 +1,102 @@
+module Datadog
+ module Core
+ module Telemetry
+ module Metric
+ type metric_type = "count" | "gauge" | "rate" | "distributions" | nil
+
+ type input_value = Integer | Float
+
+ type metric_value = Array[input_value]
+ type distribution_value = input_value
+
+ type tags_input = ::Hash[String, String] | Array[String]
+
+ def self.metric_id: (metric_type type, String name, ?Array[String] tags) -> ::String
+
+ class Base
+ @name: String
+
+ @values: Array[untyped]
+
+ @tags: Array[String]
+
+ @common: bool
+
+ @interval: Integer?
+
+ attr_reader name: String
+
+ attr_reader tags: Array[String]
+
+ attr_reader values: Array[untyped]
+
+ attr_reader common: bool
+
+ attr_reader interval: Integer?
+
+ def initialize: (String name, ?tags: tags_input, ?common: bool, ?interval: Integer?) -> void
+
+ def track: (Numeric value) -> void
+
+ def type: () -> metric_type
+
+ def to_h: () -> Hash[Symbol, untyped]
+
+ private
+
+ def tags_to_array: (tags_input tags) -> Array[String]
+ end
+
+ class Count < Base
+ TYPE: "count"
+
+ @values: Array[metric_value]
+ attr_reader values: Array[metric_value]
+
+ def type: () -> "count"
+
+ def inc: (?::Integer value) -> void
+
+ def dec: (?::Integer value) -> void
+
+ def track: (Integer value) -> void
+ end
+
+ class Gauge < Base
+ TYPE: "gauge"
+
+ def type: () -> "gauge"
+
+ def track: (input_value value) -> void
+ end
+
+ class Rate < Base
+ @value: Float
+
+ @values: Array[metric_value]
+ attr_reader values: Array[metric_value]
+
+ TYPE: "rate"
+
+ def initialize: (String name, ?tags: tags_input, ?common: bool, ?interval: Integer?) -> void
+
+ def type: () -> "rate"
+
+ def track: (?::Float value) -> void
+ end
+
+ class Distribution < Base
+ TYPE: "distributions"
+
+ @values: Array[distribution_value]
+ attr_reader values: Array[distribution_value]
+
+ def type: () -> "distributions"
+
+ def track: (input_value value) -> void
+ def to_h: () -> { metric: String, points: Array[distribution_value], tags: Array[String], common: bool }
+ end
+ end
+ end
+ end
+end
diff --git a/sig/datadog/core/telemetry/worker.rbs b/sig/datadog/core/telemetry/worker.rbs
new file mode 100644
index 00000000000..822b9fece95
--- /dev/null
+++ b/sig/datadog/core/telemetry/worker.rbs
@@ -0,0 +1,49 @@
+module Datadog
+ module Core
+ module Telemetry
+ class Worker
+ include Core::Workers::Polling
+ include Core::Workers::Async::Thread
+ include Core::Workers::Async::Thread::PrependedMethods
+ include Core::Workers::IntervalLoop
+ include Core::Workers::Queue
+
+ TELEMETRY_STARTED_ONCE: Datadog::Core::Utils::OnlyOnceSuccessful
+ APP_STARTED_EVENT_RETRIES: 10
+ DEFAULT_BUFFER_MAX_SIZE: 1000
+
+ @emitter: Emitter
+ @sent_started_event: bool
+ @shutdown_timeout: Integer
+ @buffer_size: Integer
+ @dependency_collection: bool
+
+ def initialize: (?enabled: bool, heartbeat_interval_seconds: Numeric, emitter: Emitter, ?shutdown_timeout: Integer, ?buffer_size: Integer, dependency_collection: bool) -> void
+
+ def start: () -> void
+
+ def sent_started_event?: () -> bool
+
+ def failed_to_start?: () -> bool
+
+ def enqueue: (Event::Base event) -> void
+
+ def dequeue: () -> Array[Event::Base]
+
+ private
+
+ def heartbeat!: () -> void
+
+ def started!: () -> void
+
+ def flush_events: (Array[Event::Base] events) -> void
+
+ def send_event: (Event::Base event) -> Datadog::Core::Telemetry::Http::Adapters::Net::Response
+
+ def disable_on_not_found!: (Datadog::Core::Telemetry::Http::Adapters::Net::Response response) -> void
+
+ def buffer_klass: () -> untyped
+ end
+ end
+ end
+end
diff --git a/sig/datadog/core/utils/only_once.rbs b/sig/datadog/core/utils/only_once.rbs
index 324b1ba72e7..354334c71e1 100644
--- a/sig/datadog/core/utils/only_once.rbs
+++ b/sig/datadog/core/utils/only_once.rbs
@@ -2,6 +2,9 @@ module Datadog
module Core
module Utils
class OnlyOnce
+ @ran_once: bool
+ @mutex: Thread::Mutex
+
def initialize: () -> untyped
def run: () { () -> untyped } -> untyped
diff --git a/sig/datadog/core/utils/only_once_successful.rbs b/sig/datadog/core/utils/only_once_successful.rbs
new file mode 100644
index 00000000000..2236b5a66b5
--- /dev/null
+++ b/sig/datadog/core/utils/only_once_successful.rbs
@@ -0,0 +1,23 @@
+module Datadog
+ module Core
+ module Utils
+ class OnlyOnceSuccessful < Datadog::Core::Utils::OnlyOnce
+ @limit: Integer
+ @retries: Integer
+ @failed: bool
+
+ def initialize: (?Integer limit) -> void
+
+ def success?: () -> bool
+
+ def failed?: () -> bool
+
+ private
+
+ def check_limit!: () -> void
+
+ def limited?: () -> bool
+ end
+ end
+ end
+end
diff --git a/sig/datadog/core/workers/polling.rbs b/sig/datadog/core/workers/polling.rbs
index 7f4d8f9c55b..43c1360a92c 100644
--- a/sig/datadog/core/workers/polling.rbs
+++ b/sig/datadog/core/workers/polling.rbs
@@ -2,7 +2,7 @@ module Datadog
module Core
module Workers
module Polling
- SHUTDOWN_TIMEOUT: 1
+ DEFAULT_SHUTDOWN_TIMEOUT: 1
def self.included: (Class | Module base) -> void
diff --git a/spec/datadog/core/configuration/components_spec.rb b/spec/datadog/core/configuration/components_spec.rb
index e04fad93fcf..79c65637f91 100644
--- a/spec/datadog/core/configuration/components_spec.rb
+++ b/spec/datadog/core/configuration/components_spec.rb
@@ -7,7 +7,7 @@
require 'datadog/core/diagnostics/environment_logger'
require 'datadog/core/diagnostics/health'
require 'datadog/core/logger'
-require 'datadog/core/telemetry/client'
+require 'datadog/core/telemetry/component'
require 'datadog/core/runtime/metrics'
require 'datadog/core/workers/runtime_metrics'
require 'datadog/statsd'
@@ -33,7 +33,7 @@
let(:profiler_setup_task) { Datadog::Profiling.supported? ? instance_double(Datadog::Profiling::Tasks::Setup) : nil }
let(:remote) { instance_double(Datadog::Core::Remote::Component, start: nil, shutdown!: nil) }
- let(:telemetry) { instance_double(Datadog::Core::Telemetry::Client) }
+ let(:telemetry) { instance_double(Datadog::Core::Telemetry::Component) }
let(:environment_logger_extra) { { hello: 123, world: '456' } }
@@ -46,7 +46,7 @@
end
allow(Datadog::Statsd).to receive(:new) { instance_double(Datadog::Statsd) }
allow(Datadog::Core::Remote::Component).to receive(:new).and_return(remote)
- allow(Datadog::Core::Telemetry::Client).to receive(:new).and_return(telemetry)
+ allow(Datadog::Core::Telemetry::Component).to receive(:new).and_return(telemetry)
end
around do |example|
@@ -223,7 +223,7 @@
let(:logger) { instance_double(Logger) }
context 'given settings' do
- let(:telemetry_client) { instance_double(Datadog::Core::Telemetry::Client) }
+ let(:telemetry) { instance_double(Datadog::Core::Telemetry::Component) }
let(:expected_options) do
{ enabled: enabled, heartbeat_interval_seconds: heartbeat_interval_seconds,
dependency_collection: dependency_collection }
@@ -233,16 +233,16 @@
let(:dependency_collection) { true }
before do
- expect(Datadog::Core::Telemetry::Client).to receive(:new).with(expected_options).and_return(telemetry_client)
+ expect(Datadog::Core::Telemetry::Component).to receive(:new).with(expected_options).and_return(telemetry)
allow(settings.telemetry).to receive(:enabled).and_return(enabled)
end
- it { is_expected.to be(telemetry_client) }
+ it { is_expected.to be(telemetry) }
context 'with :enabled true' do
let(:enabled) { double('enabled') }
- it { is_expected.to be(telemetry_client) }
+ it { is_expected.to be(telemetry) }
context 'and :unix agent adapter' do
let(:expected_options) do
@@ -255,7 +255,7 @@
it 'does not enable telemetry for unsupported non-http transport' do
expect(logger).to receive(:debug)
- is_expected.to be(telemetry_client)
+ is_expected.to be(telemetry)
end
end
end
@@ -1108,7 +1108,7 @@
let(:runtime_metrics) { instance_double(Datadog::Core::Runtime::Metrics, statsd: statsd) }
let(:health_metrics) { instance_double(Datadog::Core::Diagnostics::Health::Metrics, statsd: statsd) }
let(:statsd) { instance_double(::Datadog::Statsd) }
- let(:telemetry) { instance_double(Datadog::Core::Telemetry::Client) }
+ let(:telemetry) { instance_double(Datadog::Core::Telemetry::Component) }
before do
allow(replacement).to receive(:tracer).and_return(tracer)
diff --git a/spec/datadog/core/configuration_spec.rb b/spec/datadog/core/configuration_spec.rb
index 7873d262f70..51a2b44cb01 100644
--- a/spec/datadog/core/configuration_spec.rb
+++ b/spec/datadog/core/configuration_spec.rb
@@ -8,13 +8,12 @@
RSpec.describe Datadog::Core::Configuration do
let(:default_log_level) { ::Logger::INFO }
- let(:telemetry_client) { instance_double(Datadog::Core::Telemetry::Client) }
+ let(:telemetry) { instance_double(Datadog::Core::Telemetry::Component) }
before do
- allow(telemetry_client).to receive(:started!)
- allow(telemetry_client).to receive(:stop!)
- allow(telemetry_client).to receive(:emit_closing!)
- allow(Datadog::Core::Telemetry::Client).to receive(:new).and_return(telemetry_client)
+ allow(telemetry).to receive(:stop!)
+ allow(telemetry).to receive(:emit_closing!)
+ allow(Datadog::Core::Telemetry::Component).to receive(:new).and_return(telemetry)
allow(Datadog::Core::Remote::Component).to receive(:build)
end
@@ -41,10 +40,6 @@
end
it do
- # We cannot mix `expect().to_not` with `expect().to(...).ordered`.
- # One way around that is to force the method to raise an error if it's ever called.
- allow(telemetry_client).to receive(:started!).and_raise('Should not be called')
-
# Components should have changed
expect { configure }
.to change { test_class.send(:components) }
@@ -84,7 +79,6 @@
.with(test_class.configuration)
expect(new_components).to_not have_received(:shutdown!)
- expect(telemetry_client).to have_received(:started!)
end
end
end
@@ -501,8 +495,6 @@
describe '#components' do
context 'when components are not initialized' do
it 'initializes the components' do
- expect(telemetry_client).to receive(:started!)
-
test_class.send(:components)
expect(test_class.send(:components?)).to be true
@@ -510,8 +502,6 @@
context 'when allow_initialization is false' do
it 'does not initialize the components' do
- expect(telemetry_client).to_not receive(:started!)
-
test_class.send(:components, allow_initialization: false)
expect(test_class.send(:components?)).to be false
@@ -527,7 +517,6 @@
it 'returns the components without touching the COMPONENTS_WRITE_LOCK' do
described_class.const_get(:COMPONENTS_WRITE_LOCK).lock
- expect(telemetry_client).to_not receive(:started!)
expect(test_class.send(:components)).to_not be_nil
end
end
diff --git a/spec/datadog/core/telemetry/client_spec.rb b/spec/datadog/core/telemetry/client_spec.rb
deleted file mode 100644
index 5618d4ea65e..00000000000
--- a/spec/datadog/core/telemetry/client_spec.rb
+++ /dev/null
@@ -1,329 +0,0 @@
-require 'spec_helper'
-
-require 'datadog/core/telemetry/client'
-
-RSpec.describe Datadog::Core::Telemetry::Client do
- subject(:client) do
- described_class.new(
- enabled: enabled,
- heartbeat_interval_seconds: heartbeat_interval_seconds,
- dependency_collection: dependency_collection
- )
- end
-
- let(:enabled) { true }
- let(:heartbeat_interval_seconds) { 1.3 }
- let(:dependency_collection) { true }
- let(:emitter) { double(Datadog::Core::Telemetry::Emitter) }
- let(:response) { double(Datadog::Core::Telemetry::Http::Adapters::Net::Response) }
- let(:not_found) { false }
-
- before do
- allow(Datadog::Core::Telemetry::Emitter).to receive(:new).and_return(emitter)
- allow(emitter).to receive(:request).and_return(response)
- allow(response).to receive(:not_found?).and_return(not_found)
- end
-
- describe '#initialize' do
- after do
- client.stop!
- end
-
- context 'with default parameters' do
- subject(:client) do
- described_class.new(
- heartbeat_interval_seconds: heartbeat_interval_seconds,
- dependency_collection: dependency_collection
- )
- end
-
- it { is_expected.to be_a_kind_of(described_class) }
- it { expect(client.enabled).to be(true) }
- end
-
- context 'when :enabled is false' do
- let(:enabled) { false }
- it { is_expected.to be_a_kind_of(described_class) }
- it { expect(client.enabled).to be(false) }
- end
-
- context 'when enabled' do
- let(:enabled) { true }
-
- it { is_expected.to be_a_kind_of(described_class) }
- it { expect(client.enabled).to be(true) }
- end
- end
-
- describe '#disable!' do
- after do
- client.stop!
- end
-
- it { expect { client.disable! }.to change { client.enabled }.from(true).to(false) }
- end
-
- describe '#started!' do
- subject(:started!) { client.started! }
-
- after do
- client.stop!
- end
-
- context 'when disabled' do
- let(:enabled) { false }
- it do
- started!
- expect(emitter).to_not have_received(:request)
- end
- end
-
- context 'when enabled' do
- let(:enabled) { true }
-
- context 'when dependency_collection is true' do
- it do
- app_started = double
- allow(Datadog::Core::Telemetry::Event::AppStarted).to receive(:new).with(no_args).and_return(app_started)
-
- dependencies = double
- allow(Datadog::Core::Telemetry::Event::AppDependenciesLoaded)
- .to receive(:new).with(no_args).and_return(dependencies)
-
- started!
- expect(emitter).to have_received(:request).with(app_started)
- expect(emitter).to have_received(:request).with(dependencies)
- end
- end
-
- context 'when dependency_collection is false' do
- let(:dependency_collection) { false }
-
- it do
- app_started = double
- allow(Datadog::Core::Telemetry::Event::AppStarted).to receive(:new).with(no_args).and_return(app_started)
-
- dependencies = double
- allow(Datadog::Core::Telemetry::Event::AppDependenciesLoaded)
- .to receive(:new).with(no_args).and_return(dependencies)
-
- started!
- expect(emitter).to have_received(:request).with(app_started)
- expect(emitter).to_not have_received(:request).with(dependencies)
- end
-
- context 'with heartbeat' do
- let(:heartbeat_interval_seconds) { 0 }
-
- it 'sends a heartbeat strictly after app-started' do
- @sent_hearbeat = false
- allow(emitter).to receive(:request).with(kind_of(Datadog::Core::Telemetry::Event::AppHeartbeat)) do
- # Ensure app-started was already sent by now
- expect(emitter).to have_received(:request).with(kind_of(Datadog::Core::Telemetry::Event::AppStarted))
- @sent_hearbeat = true
- response
- end
-
- client.started!
-
- try_wait_until { @sent_hearbeat }
- end
- end
- end
- end
-
- context 'when internal error returned by emitter' do
- let(:response) { Datadog::Core::Telemetry::Http::InternalErrorResponse.new('error') }
-
- it { expect { started! }.to_not raise_error }
- end
-
- context 'when response returns 404' do
- let(:not_found) { true }
-
- before do
- logger = double(Datadog::Core::Logger)
- allow(logger).to receive(:debug).with(any_args)
- allow(Datadog).to receive(:logger).and_return(logger)
- end
-
- it do
- started!
- expect(client.enabled).to be(false)
- expect(client.unsupported).to be(true)
- expect(Datadog.logger).to have_received(:debug).with(
- 'Agent does not support telemetry; disabling future telemetry events.'
- )
- end
- end
-
- context 'when in fork' do
- before { skip 'Fork not supported on current platform' unless Process.respond_to?(:fork) }
-
- it do
- client
- expect_in_fork do
- expect(emitter).to_not receive(:request)
- client.started!
- end
- end
- end
- end
-
- describe '#emit_closing!' do
- subject(:emit_closing!) { client.emit_closing! }
-
- after do
- client.stop!
- end
-
- context 'when disabled' do
- let(:enabled) { false }
- it do
- emit_closing!
- expect(emitter).to_not have_received(:request)
- end
- end
-
- context 'when enabled' do
- let(:enabled) { true }
- it do
- double = double()
- allow(Datadog::Core::Telemetry::Event::AppClosing).to receive(:new).with(no_args).and_return(double)
-
- emit_closing!
- expect(emitter).to have_received(:request).with(double)
- end
-
- it { is_expected.to be(response) }
- end
-
- context 'when in fork' do
- before { skip 'Fork not supported on current platform' unless Process.respond_to?(:fork) }
-
- it do
- client
- expect_in_fork do
- expect(emitter).to_not receive(:request)
- client.started!
- end
- end
- end
- end
-
- describe '#stop!' do
- subject(:stop!) { client.stop! }
- let(:worker) { instance_double(Datadog::Core::Telemetry::Heartbeat) }
-
- before do
- allow(Datadog::Core::Telemetry::Heartbeat).to receive(:new)
- .with(enabled: enabled, heartbeat_interval_seconds: heartbeat_interval_seconds).and_return(worker)
- allow(worker).to receive(:start)
- allow(worker).to receive(:stop)
- end
-
- context 'when disabled' do
- let(:enabled) { false }
- it 'does not raise error' do
- stop!
- end
- end
-
- context 'when enabled' do
- let(:enabled) { true }
-
- context 'when stop! has been called already' do
- it 'does not raise error' do
- stop!
- stop!
- end
- end
- end
- end
-
- describe '#integrations_change!' do
- subject(:integrations_change!) { client.integrations_change! }
-
- after do
- client.stop!
- end
-
- context 'when disabled' do
- let(:enabled) { false }
- it do
- integrations_change!
- expect(emitter).to_not have_received(:request)
- end
- end
-
- context 'when enabled' do
- let(:enabled) { true }
- it do
- double = double()
- allow(Datadog::Core::Telemetry::Event::AppIntegrationsChange).to receive(:new).with(no_args).and_return(double)
-
- integrations_change!
- expect(emitter).to have_received(:request).with(double)
- end
-
- it { is_expected.to be(response) }
- end
-
- context 'when in fork' do
- before { skip 'Fork not supported on current platform' unless Process.respond_to?(:fork) }
-
- it do
- client
- expect_in_fork do
- expect(emitter).to_not receive(:request)
- client.started!
- end
- end
- end
- end
-
- describe '#client_configuration_change!' do
- subject(:client_configuration_change!) { client.client_configuration_change!(changes) }
- let(:changes) { double('changes') }
-
- after do
- client.stop!
- end
-
- context 'when disabled' do
- let(:enabled) { false }
- it do
- client_configuration_change!
- expect(emitter).to_not have_received(:request)
- end
- end
-
- context 'when enabled' do
- let(:enabled) { true }
- it do
- double = double()
- allow(Datadog::Core::Telemetry::Event::AppClientConfigurationChange).to receive(:new).with(
- changes,
- 'remote_config'
- ).and_return(double)
-
- client_configuration_change!
- expect(emitter).to have_received(:request).with(double)
- end
-
- it { is_expected.to be(response) }
- end
-
- context 'when in fork' do
- before { skip 'Fork not supported on current platform' unless Process.respond_to?(:fork) }
-
- it do
- client
- expect_in_fork do
- expect(emitter).to_not receive(:request)
- client.started!
- end
- end
- end
- end
-end
diff --git a/spec/datadog/core/telemetry/component_spec.rb b/spec/datadog/core/telemetry/component_spec.rb
new file mode 100644
index 00000000000..ba17d37c2f4
--- /dev/null
+++ b/spec/datadog/core/telemetry/component_spec.rb
@@ -0,0 +1,207 @@
+require 'spec_helper'
+
+require 'datadog/core/telemetry/component'
+
+RSpec.describe Datadog::Core::Telemetry::Component do
+ subject(:telemetry) do
+ described_class.new(
+ enabled: enabled,
+ heartbeat_interval_seconds: heartbeat_interval_seconds,
+ dependency_collection: dependency_collection
+ )
+ end
+
+ let(:enabled) { true }
+ let(:heartbeat_interval_seconds) { 0 }
+ let(:dependency_collection) { true }
+ let(:worker) { double(Datadog::Core::Telemetry::Worker) }
+ let(:not_found) { false }
+
+ before do
+ allow(Datadog::Core::Telemetry::Worker).to receive(:new).with(
+ heartbeat_interval_seconds: heartbeat_interval_seconds,
+ dependency_collection: dependency_collection,
+ enabled: enabled,
+ emitter: an_instance_of(Datadog::Core::Telemetry::Emitter)
+ ).and_return(worker)
+
+ allow(worker).to receive(:start)
+ allow(worker).to receive(:enqueue)
+ allow(worker).to receive(:stop)
+ allow(worker).to receive(:"enabled=")
+ end
+
+ describe '#initialize' do
+ after do
+ telemetry.stop!
+ end
+
+ context 'with default parameters' do
+ subject(:telemetry) do
+ described_class.new(
+ heartbeat_interval_seconds: heartbeat_interval_seconds,
+ dependency_collection: dependency_collection
+ )
+ end
+
+ it { is_expected.to be_a_kind_of(described_class) }
+ it { expect(telemetry.enabled).to be(true) }
+ end
+
+ context 'when :enabled is false' do
+ let(:enabled) { false }
+ it { is_expected.to be_a_kind_of(described_class) }
+ it { expect(telemetry.enabled).to be(false) }
+ end
+
+ context 'when enabled' do
+ let(:enabled) { true }
+
+ it { is_expected.to be_a_kind_of(described_class) }
+ it { expect(telemetry.enabled).to be(true) }
+ end
+ end
+
+ describe '#disable!' do
+ after do
+ telemetry.stop!
+ end
+
+ it { expect { telemetry.disable! }.to change { telemetry.enabled }.from(true).to(false) }
+
+ it 'disables worker' do
+ telemetry.disable!
+
+ expect(worker).to have_received(:"enabled=").with(false)
+ end
+ end
+
+ describe '#emit_closing!' do
+ subject(:emit_closing!) { telemetry.emit_closing! }
+
+ after do
+ telemetry.stop!
+ end
+
+ context 'when disabled' do
+ let(:enabled) { false }
+ it do
+ emit_closing!
+
+ expect(worker).not_to have_received(:enqueue)
+ end
+ end
+
+ context 'when enabled' do
+ let(:enabled) { true }
+ it do
+ emit_closing!
+
+ expect(worker).to have_received(:enqueue).with(
+ an_instance_of(Datadog::Core::Telemetry::Event::AppClosing)
+ )
+ end
+ end
+
+ context 'when in fork' do
+ before { skip 'Fork not supported on current platform' unless Process.respond_to?(:fork) }
+
+ it do
+ telemetry
+ expect_in_fork do
+ expect(worker).not_to have_received(:enqueue)
+ end
+ end
+ end
+ end
+
+ describe '#stop!' do
+ subject(:stop!) { telemetry.stop! }
+
+ it 'stops worker once' do
+ stop!
+ stop!
+
+ expect(worker).to have_received(:stop).once
+ end
+ end
+
+ describe '#integrations_change!' do
+ subject(:integrations_change!) { telemetry.integrations_change! }
+
+ after do
+ telemetry.stop!
+ end
+
+ context 'when disabled' do
+ let(:enabled) { false }
+ it do
+ integrations_change!
+
+ expect(worker).not_to have_received(:enqueue)
+ end
+ end
+
+ context 'when enabled' do
+ let(:enabled) { true }
+ it do
+ integrations_change!
+
+ expect(worker).to have_received(:enqueue).with(
+ an_instance_of(Datadog::Core::Telemetry::Event::AppIntegrationsChange)
+ )
+ end
+ end
+
+ context 'when in fork' do
+ before { skip 'Fork not supported on current platform' unless Process.respond_to?(:fork) }
+
+ it do
+ telemetry
+ expect_in_fork do
+ expect(worker).not_to have_received(:enqueue)
+ end
+ end
+ end
+ end
+
+ describe '#client_configuration_change!' do
+ subject(:client_configuration_change!) { telemetry.client_configuration_change!(changes) }
+ let(:changes) { double('changes') }
+
+ after do
+ telemetry.stop!
+ end
+
+ context 'when disabled' do
+ let(:enabled) { false }
+ it do
+ client_configuration_change!
+
+ expect(worker).not_to have_received(:enqueue)
+ end
+ end
+
+ context 'when enabled' do
+ let(:enabled) { true }
+ it do
+ client_configuration_change!
+
+ expect(worker).to have_received(:enqueue).with(
+ an_instance_of(Datadog::Core::Telemetry::Event::AppClientConfigurationChange)
+ )
+ end
+ end
+
+ context 'when in fork' do
+ before { skip 'Fork not supported on current platform' unless Process.respond_to?(:fork) }
+
+ it do
+ telemetry
+ expect_in_fork do
+ expect(worker).not_to have_received(:enqueue)
+ end
+ end
+ end
+ end
+end
diff --git a/spec/datadog/core/telemetry/event_spec.rb b/spec/datadog/core/telemetry/event_spec.rb
index 32d83e54fc4..f53bd9468ad 100644
--- a/spec/datadog/core/telemetry/event_spec.rb
+++ b/spec/datadog/core/telemetry/event_spec.rb
@@ -1,6 +1,7 @@
require 'spec_helper'
require 'datadog/core/telemetry/event'
+require 'datadog/core/telemetry/metric'
RSpec.describe Datadog::Core::Telemetry::Event do
let(:id) { double('seq_id') }
@@ -207,4 +208,48 @@ def contain_configuration(*array)
is_expected.to eq({})
end
end
+
+ context 'GenerateMetrics' do
+ let(:event) { described_class::GenerateMetrics.new(namespace, metrics) }
+
+ let(:namespace) { 'general' }
+ let(:metric_name) { 'request_count' }
+ let(:metric) do
+ Datadog::Core::Telemetry::Metric::Count.new(metric_name, tags: { status: '200' })
+ end
+ let(:metrics) { [metric] }
+
+ let(:expected_metric_series) { [metric.to_h] }
+
+ it do
+ is_expected.to eq(
+ {
+ namespace: namespace,
+ series: expected_metric_series
+ }
+ )
+ end
+ end
+
+ context 'Distributions' do
+ let(:event) { described_class::Distributions.new(namespace, metrics) }
+
+ let(:namespace) { 'general' }
+ let(:metric_name) { 'request_duration' }
+ let(:metric) do
+ Datadog::Core::Telemetry::Metric::Distribution.new(metric_name, tags: { status: '200' })
+ end
+ let(:metrics) { [metric] }
+
+ let(:expected_metric_series) { [metric.to_h] }
+
+ it do
+ is_expected.to eq(
+ {
+ namespace: namespace,
+ series: expected_metric_series
+ }
+ )
+ end
+ end
end
diff --git a/spec/datadog/core/telemetry/heartbeat_spec.rb b/spec/datadog/core/telemetry/heartbeat_spec.rb
deleted file mode 100644
index 645120267e0..00000000000
--- a/spec/datadog/core/telemetry/heartbeat_spec.rb
+++ /dev/null
@@ -1,46 +0,0 @@
-require 'spec_helper'
-
-require 'datadog/core/telemetry/heartbeat'
-
-RSpec.describe Datadog::Core::Telemetry::Heartbeat do
- subject(:heartbeat) do
- described_class.new(enabled: enabled, heartbeat_interval_seconds: heartbeat_interval_seconds, &block)
- end
-
- let(:enabled) { true }
- let(:heartbeat_interval_seconds) { 1.2 }
- let(:block) { proc {} }
-
- after do
- heartbeat.stop(true, 0)
- heartbeat.join
- end
-
- describe '.new' do
- context 'when using default settings' do
- subject(:heartbeat) { described_class.new(heartbeat_interval_seconds: heartbeat_interval_seconds, &block) }
- it do
- is_expected.to have_attributes(
- enabled?: true,
- loop_base_interval: 1.2, # seconds
- task: block
- )
- end
- end
-
- context 'when enabled' do
- let(:enabled) { true }
-
- it do
- heartbeat
-
- try_wait_until { heartbeat.running? }
- expect(heartbeat).to have_attributes(
- run_async?: true,
- running?: true,
- started?: true
- )
- end
- end
- end
-end
diff --git a/spec/datadog/core/telemetry/metric_spec.rb b/spec/datadog/core/telemetry/metric_spec.rb
new file mode 100644
index 00000000000..8b77475a8f9
--- /dev/null
+++ b/spec/datadog/core/telemetry/metric_spec.rb
@@ -0,0 +1,277 @@
+require 'spec_helper'
+
+require 'datadog/core/telemetry/metric'
+
+RSpec.describe Datadog::Core::Telemetry::Metric do
+ let(:now) { 123123 }
+ before { allow(Time).to receive(:now).and_return(now, now + 1, now + 2, now + 3) }
+
+ describe '.metric_id' do
+ subject(:metric_id) { described_class.metric_id(type, name, tags) }
+
+ let(:type) { 'type' }
+ let(:name) { 'name' }
+ let(:tags) { ['tag1:val1', 'tag2:val2'] }
+
+ it { is_expected.to eq('type::name::tag1:val1,tag2:val2') }
+ end
+
+ describe Datadog::Core::Telemetry::Metric::Count do
+ subject(:metric) { described_class.new(name, tags: tags) }
+
+ let(:name) { 'metric_name' }
+ let(:tags) { { tag1: 'val1', tag2: 'val2' } }
+
+ it do
+ is_expected.to have_attributes(
+ name: name,
+ tags: ['tag1:val1', 'tag2:val2'],
+ interval: nil,
+ common: true,
+ values: []
+ )
+ end
+
+ describe '#type' do
+ subject(:type) { metric.type }
+
+ it { is_expected.to eq('count') }
+ end
+
+ describe '#inc' do
+ subject(:inc) { metric.inc(value) }
+
+ let(:value) { 5 }
+
+ it 'tracks the value' do
+ expect { inc }.to change { metric.values }.from([]).to([[now, value]])
+ end
+
+ context 'incrementing again' do
+ it 'adds the value to the previous one and updates timestamp' do
+ metric.inc(value)
+ expect { inc }.to change { metric.values }.from([[now, value]]).to([[now + 1, value + value]])
+ end
+ end
+ end
+
+ describe '#dec' do
+ subject(:dec) { metric.dec(value) }
+
+ let(:value) { 5 }
+
+ it 'tracks the value' do
+ expect { dec }.to change { metric.values }.from([]).to([[now, -value]])
+ end
+ end
+
+ describe '#to_h' do
+ subject(:to_h) { metric.to_h }
+ let(:value) { 2 }
+
+ before do
+ metric.inc(value)
+ end
+
+ it do
+ is_expected.to eq(
+ metric: name,
+ points: [[now, 2]],
+ type: 'count',
+ tags: ['tag1:val1', 'tag2:val2'],
+ common: true
+ )
+ end
+ end
+ end
+
+ describe Datadog::Core::Telemetry::Metric::Gauge do
+ subject(:metric) { described_class.new(name, tags: tags, interval: interval) }
+
+ let(:name) { 'metric_name' }
+ let(:tags) { { tag1: 'val1', tag2: 'val2' } }
+ let(:interval) { 10 }
+
+ it do
+ is_expected.to have_attributes(
+ name: name,
+ tags: ['tag1:val1', 'tag2:val2'],
+ interval: interval,
+ common: true,
+ values: []
+ )
+ end
+
+ describe '#type' do
+ subject(:type) { metric.type }
+
+ it { is_expected.to eq('gauge') }
+ end
+
+ describe '#track' do
+ subject(:track) { metric.track(value) }
+
+ let(:value) { 5 }
+
+ it 'tracks the value' do
+ expect { track }.to change { metric.values }.from([]).to([[now, value]])
+ end
+
+ context 'tracking again' do
+ it 'updates the value and timestamp' do
+ metric.track(value + 1)
+ expect { track }.to change { metric.values }.from([[now, value + 1]]).to([[now + 1, value]])
+ end
+ end
+ end
+
+ describe '#to_h' do
+ subject(:to_h) { metric.to_h }
+ let(:value) { 2 }
+
+ before do
+ metric.track(value)
+ end
+
+ it do
+ is_expected.to eq(
+ metric: name,
+ points: [[now, 2]],
+ type: 'gauge',
+ tags: ['tag1:val1', 'tag2:val2'],
+ common: true,
+ interval: interval
+ )
+ end
+ end
+ end
+
+ describe Datadog::Core::Telemetry::Metric::Rate do
+ subject(:metric) { described_class.new(name, tags: tags, interval: interval) }
+
+ let(:name) { 'metric_name' }
+ let(:tags) { { tag1: 'val1', tag2: 'val2' } }
+ let(:interval) { 10 }
+
+ it do
+ is_expected.to have_attributes(
+ name: name,
+ tags: ['tag1:val1', 'tag2:val2'],
+ interval: interval,
+ common: true,
+ values: []
+ )
+ end
+
+ describe '#type' do
+ subject(:type) { metric.type }
+
+ it { is_expected.to eq('rate') }
+ end
+
+ describe '#track' do
+ subject(:track) { metric.track(value) }
+
+ let(:value) { 5 }
+
+ it 'tracks the rate value' do
+ expect { track }.to change { metric.values }.from([]).to([[now, value.to_f / interval]])
+ end
+
+ context 'tracking again' do
+ it 'updates the value and timestamp' do
+ metric.track(value)
+ expect { track }.to change { metric.values }
+ .from([[now, value.to_f / interval]])
+ .to([[now + 1, (value + value).to_f / interval]])
+ end
+ end
+
+ context 'interval is nil' do
+ let(:interval) { nil }
+
+ it 'sets rate to zero' do
+ expect { track }.to change { metric.values }.from([]).to([[now, 0.0]])
+ end
+ end
+ end
+
+ describe '#to_h' do
+ subject(:to_h) { metric.to_h }
+ let(:value) { 2 }
+
+ before do
+ metric.track(value)
+ end
+
+ it do
+ is_expected.to eq(
+ metric: name,
+ points: [[now, 0.2]],
+ type: 'rate',
+ tags: ['tag1:val1', 'tag2:val2'],
+ common: true,
+ interval: 10
+ )
+ end
+ end
+ end
+
+ describe Datadog::Core::Telemetry::Metric::Distribution do
+ subject(:metric) { described_class.new(name, tags: tags) }
+
+ let(:name) { 'metric_name' }
+ let(:tags) { { tag1: 'val1', tag2: 'val2' } }
+
+ it do
+ is_expected.to have_attributes(
+ name: name,
+ tags: ['tag1:val1', 'tag2:val2'],
+ interval: nil,
+ common: true,
+ values: []
+ )
+ end
+
+ describe '#type' do
+ subject(:type) { metric.type }
+
+ it { is_expected.to eq('distributions') }
+ end
+
+ describe '#track' do
+ subject(:track) { metric.track(value) }
+
+ let(:value) { 5 }
+
+ it 'tracks the value' do
+ expect { track }.to change { metric.values }.from([]).to([value])
+ end
+
+ context 'tracking again' do
+ it 'adds the value to the previous ones' do
+ metric.track(value)
+ expect { track }.to change { metric.values }.from([value]).to([value, value])
+ end
+ end
+ end
+
+ describe '#to_h' do
+ subject(:to_h) { metric.to_h }
+ let(:value) { 2 }
+
+ before do
+ metric.track(value)
+ end
+
+ it do
+ is_expected.to eq(
+ metric: name,
+ points: [2],
+ tags: ['tag1:val1', 'tag2:val2'],
+ common: true
+ )
+ end
+ end
+ end
+end
diff --git a/spec/datadog/core/telemetry/worker_spec.rb b/spec/datadog/core/telemetry/worker_spec.rb
new file mode 100644
index 00000000000..e73fec0888e
--- /dev/null
+++ b/spec/datadog/core/telemetry/worker_spec.rb
@@ -0,0 +1,303 @@
+require 'spec_helper'
+
+require 'datadog/core/telemetry/worker'
+
+RSpec.describe Datadog::Core::Telemetry::Worker do
+ subject(:worker) do
+ described_class.new(
+ enabled: enabled,
+ heartbeat_interval_seconds: heartbeat_interval_seconds,
+ emitter: emitter,
+ dependency_collection: dependency_collection
+ )
+ end
+
+ let(:enabled) { true }
+ let(:heartbeat_interval_seconds) { 0.5 }
+ let(:emitter) { double(Datadog::Core::Telemetry::Emitter) }
+ let(:dependency_collection) { false }
+
+ let(:backend_supports_telemetry?) { true }
+ let(:response) do
+ double(
+ Datadog::Core::Telemetry::Http::Adapters::Net::Response,
+ not_found?: !backend_supports_telemetry?,
+ ok?: backend_supports_telemetry?
+ )
+ end
+
+ before do
+ logger = double(Datadog::Core::Logger)
+ allow(logger).to receive(:debug).with(any_args)
+ allow(Datadog).to receive(:logger).and_return(logger)
+
+ @received_started = false
+ @received_heartbeat = false
+
+ allow(emitter).to receive(:request).with(an_instance_of(Datadog::Core::Telemetry::Event::AppStarted)) do
+ @received_started = true
+
+ response
+ end
+
+ allow(emitter).to receive(:request).with(an_instance_of(Datadog::Core::Telemetry::Event::AppHeartbeat)) do
+ @received_heartbeat = true
+
+ response
+ end
+ end
+
+ after do
+ worker.stop(true)
+ worker.join
+
+ Datadog::Core::Telemetry::Worker::TELEMETRY_STARTED_ONCE.send(:reset_ran_once_state_for_tests)
+ end
+
+ describe '.new' do
+ it 'creates a new worker in stopped state' do
+ expect(worker).to have_attributes(
+ enabled?: true,
+ loop_base_interval: heartbeat_interval_seconds,
+ run_async?: false,
+ running?: false,
+ started?: false
+ )
+ end
+ end
+
+ describe '#start' do
+ context 'when enabled' do
+ context "when backend doesn't support telemetry" do
+ let(:backend_supports_telemetry?) { false }
+
+ it 'disables the worker' do
+ worker.start
+
+ try_wait_until { @received_started }
+
+ expect(worker).to have_attributes(
+ enabled?: false,
+ loop_base_interval: heartbeat_interval_seconds,
+ )
+ expect(Datadog.logger).to have_received(:debug).with(
+ 'Agent does not support telemetry; disabling future telemetry events.'
+ )
+ expect(@received_heartbeat).to be(false)
+ end
+ end
+
+ context 'when backend supports telemetry' do
+ let(:backend_supports_telemetry?) { true }
+
+ it 'starts the worker and sends heartbeat event' do
+ worker.start
+
+ try_wait_until { @received_heartbeat }
+
+ expect(worker).to have_attributes(
+ enabled?: true,
+ loop_base_interval: heartbeat_interval_seconds,
+ run_async?: true,
+ running?: true,
+ started?: true
+ )
+ end
+
+ it 'always sends heartbeat event after started event' do
+ sent_hearbeat = false
+ allow(emitter).to receive(:request).with(kind_of(Datadog::Core::Telemetry::Event::AppHeartbeat)) do
+ # app-started was already sent by now
+ expect(worker.sent_started_event?).to be(true)
+
+ sent_hearbeat = true
+
+ response
+ end
+
+ worker.start
+
+ try_wait_until { sent_hearbeat }
+ end
+
+ context 'when app-started event fails' do
+ it 'retries' do
+ expect(emitter).to receive(:request).with(an_instance_of(Datadog::Core::Telemetry::Event::AppStarted))
+ .and_return(
+ double(
+ Datadog::Core::Telemetry::Http::Adapters::Net::Response,
+ not_found?: false,
+ ok?: false
+ )
+ ).once
+
+ expect(emitter).to receive(:request).with(an_instance_of(Datadog::Core::Telemetry::Event::AppStarted)) do
+ @received_started = true
+
+ response
+ end
+
+ sent_hearbeat = false
+ allow(emitter).to receive(:request).with(kind_of(Datadog::Core::Telemetry::Event::AppHeartbeat)) do
+ # app-started was already sent by now
+ expect(@received_started).to be(true)
+
+ sent_hearbeat = true
+
+ response
+ end
+
+ worker.start
+
+ try_wait_until { sent_hearbeat }
+ end
+ end
+
+ context 'when app-started event exhausted retries' do
+ let(:heartbeat_interval_seconds) { 0.1 }
+
+ it 'stops retrying, never sends heartbeat, and disables worker' do
+ expect(emitter).to receive(:request).with(an_instance_of(Datadog::Core::Telemetry::Event::AppStarted))
+ .and_return(
+ double(
+ Datadog::Core::Telemetry::Http::Adapters::Net::Response,
+ not_found?: false,
+ ok?: false
+ )
+ ).exactly(described_class::APP_STARTED_EVENT_RETRIES).times
+
+ sent_hearbeat = false
+ allow(emitter).to receive(:request).with(kind_of(Datadog::Core::Telemetry::Event::AppHeartbeat)) do
+ # app-started was already sent by now
+ expect(@received_started).to be(true)
+
+ sent_hearbeat = true
+
+ response
+ end
+
+ worker.start
+
+ try_wait_until { !worker.enabled? }
+
+ expect(sent_hearbeat).to be(false)
+ expect(worker.failed_to_start?).to be(true)
+ end
+ end
+
+ context 'when dependencies collection enabled' do
+ let(:dependency_collection) { true }
+
+ it 'sends dependencies loaded event after started event' do
+ sent_dependencies = false
+ allow(emitter).to receive(:request).with(kind_of(Datadog::Core::Telemetry::Event::AppDependenciesLoaded)) do
+ # app-started was already sent by now
+ # don't use worker.sent_started_event? because it uses the same lock
+ expect(@received_started).to be(true)
+
+ sent_dependencies = true
+
+ response
+ end
+
+ worker.start
+
+ try_wait_until { sent_dependencies }
+ end
+ end
+ end
+
+ context 'when internal error returned by emitter' do
+ let(:response) { Datadog::Core::Telemetry::Http::InternalErrorResponse.new('error') }
+
+ it 'does not send heartbeat event' do
+ worker.start
+
+ try_wait_until { @received_started }
+
+ expect(@received_heartbeat).to be(false)
+ end
+ end
+
+ context 'several workers running' do
+ it 'sends single started event' do
+ started_events = 0
+ allow(emitter).to receive(:request).with(kind_of(Datadog::Core::Telemetry::Event::AppStarted)) do
+ started_events += 1
+
+ response
+ end
+
+ heartbeat_events = 0
+ allow(emitter).to receive(:request).with(kind_of(Datadog::Core::Telemetry::Event::AppHeartbeat)) do
+ heartbeat_events += 1
+
+ response
+ end
+
+ workers = Array.new(3) do
+ described_class.new(
+ enabled: enabled,
+ heartbeat_interval_seconds: heartbeat_interval_seconds,
+ emitter: emitter,
+ dependency_collection: dependency_collection
+ )
+ end
+ workers.each(&:start)
+
+ try_wait_until { heartbeat_events >= 3 }
+
+ expect(started_events).to be(1)
+
+ workers.each do |w|
+ w.stop(true, 0)
+ w.join
+ end
+ end
+ end
+ end
+
+ context 'when disabled' do
+ let(:enabled) { false }
+
+ it 'does not start the worker' do
+ expect(worker).not_to receive(:perform)
+
+ worker.start
+ end
+ end
+ end
+
+ describe '#stop' do
+ let(:heartbeat_interval_seconds) { 3 }
+
+ it 'flushes events and stops the worker' do
+ worker.start
+
+ expect(worker).to receive(:flush_events).at_least(:once)
+ worker.stop(true)
+ end
+ end
+
+ describe '#enqueue' do
+ it 'adds events to the buffer and flushes them later' do
+ events_received = 0
+ allow(emitter).to receive(:request).with(
+ an_instance_of(Datadog::Core::Telemetry::Event::AppIntegrationsChange)
+ ) do
+ events_received += 1
+
+ response
+ end
+
+ worker.start
+
+ events_sent = 3
+ events_sent.times do
+ worker.enqueue(Datadog::Core::Telemetry::Event::AppIntegrationsChange.new)
+ end
+
+ try_wait_until { events_received == events_sent }
+ end
+ end
+end
diff --git a/spec/datadog/core/utils/only_once_successful_spec.rb b/spec/datadog/core/utils/only_once_successful_spec.rb
new file mode 100644
index 00000000000..e0adb31b41a
--- /dev/null
+++ b/spec/datadog/core/utils/only_once_successful_spec.rb
@@ -0,0 +1,233 @@
+require 'datadog/core/utils/only_once_successful'
+
+RSpec.describe Datadog::Core::Utils::OnlyOnceSuccessful do
+ subject(:only_once_successful) { described_class.new(limit) }
+
+ let(:limit) { 0 }
+
+ describe '#run' do
+ context 'when limitless' do
+ context 'before running once' do
+ it do
+ expect { |block| only_once_successful.run(&block) }.to yield_control
+ end
+
+ it 'returns the result of the block ran' do
+ expect(only_once_successful.run { :result }).to be :result
+ end
+ end
+
+ context 'after running once' do
+ let(:result) { nil }
+
+ before do
+ only_once_successful.run { result }
+ end
+
+ context 'when block returns truthy value' do
+ let(:result) { true }
+
+ it do
+ expect { |block| only_once_successful.run(&block) }.to_not yield_control
+ end
+
+ it do
+ expect(only_once_successful.run { :result }).to be nil
+ end
+ end
+
+ context 'when block returns falsey value' do
+ let(:result) { false }
+
+ it do
+ expect { |block| only_once_successful.run(&block) }.to yield_control
+ end
+
+ it 'runs again until block returns truthy value' do
+ expect(only_once_successful.run { :result }).to be :result
+
+ expect(only_once_successful.run { :result }).to be nil
+ end
+ end
+ end
+ end
+
+ context 'when limited' do
+ let(:limit) { 2 }
+
+ context 'when block returns truthy value' do
+ before { only_once_successful.run { true } }
+
+ it do
+ expect { |block| only_once_successful.run(&block) }.to_not yield_control
+ end
+
+ it do
+ expect(only_once_successful.run { :result }).to be nil
+ end
+ end
+
+ context 'when block returns falsey value "limit" times' do
+ before do
+ limit.times do
+ only_once_successful.run { false }
+ end
+ end
+
+ it do
+ expect { |block| only_once_successful.run(&block) }.to_not yield_control
+ end
+
+ it do
+ expect(only_once_successful.run { :result }).to be nil
+ end
+ end
+ end
+
+ context 'when run throws an exception' do
+ it 'propagates the exception out' do
+ exception = RuntimeError.new('boom')
+
+ expect { only_once_successful.run { raise exception } }.to raise_exception(exception)
+ end
+
+ it 'runs again' do
+ only_once_successful.run { raise 'boom' } rescue nil
+
+ expect { |block| only_once_successful.run(&block) }.to yield_control
+ end
+ end
+ end
+
+ describe '#ran?' do
+ context 'before running once' do
+ it do
+ expect(only_once_successful.ran?).to be false
+ end
+ end
+
+ context 'after running once' do
+ let(:result) { nil }
+
+ before do
+ only_once_successful.run { result }
+ end
+
+ context 'when block returns truthy value' do
+ let(:result) { true }
+
+ it do
+ expect(only_once_successful.ran?).to be true
+ end
+ end
+
+ context 'when block returns falsey value' do
+ it do
+ expect(only_once_successful.ran?).to be false
+ end
+ end
+ end
+
+ context 'when limited and ran "limit" times' do
+ let(:limit) { 2 }
+
+ before do
+ limit.times do
+ only_once_successful.run { false }
+ end
+ end
+
+ it do
+ expect(only_once_successful.ran?).to be true
+ end
+ end
+ end
+
+ describe '#success?' do
+ context 'before running once' do
+ it do
+ expect(only_once_successful.success?).to be false
+ end
+ end
+
+ context 'after running once' do
+ let(:result) { nil }
+
+ before do
+ only_once_successful.run { result }
+ end
+
+ context 'when block returns truthy value' do
+ let(:result) { true }
+
+ it do
+ expect(only_once_successful.success?).to be true
+ end
+ end
+
+ context 'when block returns falsey value' do
+ it do
+ expect(only_once_successful.success?).to be false
+ end
+ end
+ end
+
+ context 'when limited and ran "limit" times' do
+ let(:limit) { 2 }
+
+ before do
+ limit.times do
+ only_once_successful.run { false }
+ end
+ end
+
+ it do
+ expect(only_once_successful.success?).to be false
+ end
+ end
+ end
+
+ describe '#failed?' do
+ context 'before running once' do
+ it do
+ expect(only_once_successful.failed?).to be false
+ end
+ end
+
+ context 'after running once' do
+ let(:result) { nil }
+
+ before do
+ only_once_successful.run { result }
+ end
+
+ context 'when block returns truthy value' do
+ let(:result) { true }
+
+ it do
+ expect(only_once_successful.failed?).to be false
+ end
+ end
+
+ context 'when block returns falsey value' do
+ it do
+ expect(only_once_successful.failed?).to be false
+ end
+ end
+ end
+
+ context 'when limited and ran "limit" times' do
+ let(:limit) { 2 }
+
+ before do
+ limit.times do
+ only_once_successful.run { false }
+ end
+ end
+
+ it do
+ expect(only_once_successful.failed?).to be true
+ end
+ end
+ end
+end
diff --git a/spec/datadog/tracing/contrib/extensions_spec.rb b/spec/datadog/tracing/contrib/extensions_spec.rb
index d53dd66c0e8..461fd09852f 100644
--- a/spec/datadog/tracing/contrib/extensions_spec.rb
+++ b/spec/datadog/tracing/contrib/extensions_spec.rb
@@ -46,7 +46,7 @@
end
it 'sends a telemetry integrations change event' do
- expect_any_instance_of(Datadog::Core::Telemetry::Client).to receive(:integrations_change!)
+ expect_any_instance_of(Datadog::Core::Telemetry::Component).to receive(:integrations_change!)
configure
end
end