diff --git a/.circleci/datadog/ci-local-config.yaml b/.circleci/datadog/ci-local-config.yaml deleted file mode 100644 index 566a53d38dc..00000000000 --- a/.circleci/datadog/ci-local-config.yaml +++ /dev/null @@ -1,119 +0,0 @@ -# Config file taken from https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml -# These values are overridden by environment variables: -# api_key -# dd_site -# dd_url -# DD_TAGS -# DD_EXTRA_TAGS -# DD_ENV -# apm is explicitly disabled here for cost reasons - - -################################## -## Log collection Configuration ## -################################## - -## @param logs_enabled - boolean - optional - default: false -## @env DD_LOGS_ENABLED - boolean - optional - default: false -## Enable Datadog Agent log collection by setting logs_enabled to true. -# -logs_enabled: true - -## @param logs_config - custom object - optional -## Enter specific configurations for your Log collection. -## Uncomment this parameter and the one below to enable them. -## See https://docs.*************/agent/logs/ -# -logs_config: - - ## @param container_collect_all - boolean - optional - default: false - ## @env DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL - boolean - optional - default: false - ## Enable container log collection for all the containers (see ac_exclude to filter out containers) - # - container_collect_all: true - - -#################################### -## Trace Collection Configuration ## -#################################### - -## @param apm_config - custom object - optional -## Enter specific configurations for your trace collection. -## Uncomment this parameter and the one below to enable them. -## See https://docs.*************/agent/apm/ -# -apm_config: - - ## @param enabled - boolean - optional - default: true - ## @env DD_APM_ENABLED - boolean - optional - default: true - ## Set to true to enable the APM Agent. - # - enabled: false - -###################################### -## Process Collection Configuration ## -###################################### - -# @param process_config - custom object - optional -# Enter specific configurations for your Process data collection. -# Uncomment this parameter and the one below to enable them. -# See https://docs.*************/graphing/infrastructure/process/ - -process_config: - - # @param process_collection - custom object - optional - # Specifies settings for collecting processes. - process_collection: - # @param enabled - boolean - optional - default: false - # Enables collection of information about running processes. - enabled: false - - # @param container_collection - custom object - optional - # Specifies settings for collecting containers. - container_collection: - # @param enabled - boolean - optional - default: true - # Enables collection of information about running containers. - enabled: false - - # Deprecated - use `process_collection.enabled` and `container_collection.enabled` instead - # @param enabled - string - optional - default: "false" - # @env DD_PROCESS_CONFIG_ENABLED - string - optional - default: "false" - # A string indicating the enabled state of the Process Agent: - # * "false" : The Agent collects only containers information. - # * "true" : The Agent collects containers and processes information. - # * "disabled" : The Agent process collection is disabled. - - enabled: "false" - - # @param process_discovery - custom object - optional - # Specifies custom settings for the `process_discovery` object. - process_discovery: - # @param enabled - boolean - optional - default: true - # Toggles the `process_discovery` check. If enabled, this check gathers information about running integrations. - enabled: false - - # @param interval - duration - optional - default: 4h - minimum: 10m - # An interval in hours that specifies how often the process discovery check should run. - interval: 10m - - -########################### -## Logging Configuration ## -########################### - -## @param log_level - string - optional - default: info -## @env DD_LOG_LEVEL - string - optional - default: info -## Minimum log level of the Datadog Agent. -## Valid log levels are: trace, debug, info, warn, error, critical, and off. -## Note: When using the 'off' log level, quotes are mandatory. -# -log_level: 'debug' - -## @param log_file - string - optional -## @env DD_LOG_FILE - string - optional -## Path of the log file for the Datadog Agent. -## See https://docs.*************/agent/guide/agent-log-files/ -# -log_file: /tmp/artifacts/logs/dd-agent-log.txt - - diff --git a/.circleci/datadog/e2e-log-settings.yaml b/.circleci/datadog/e2e-log-settings.yaml deleted file mode 100644 index e13ed13c3f4..00000000000 --- a/.circleci/datadog/e2e-log-settings.yaml +++ /dev/null @@ -1,13 +0,0 @@ -logs: - - type: file - path: "/tmp/artifacts/logs/*.log" - service: "" - source: "determined-task-logs" - - type: file - path: "/tmp/devcluster/*.log" - service: "" - source: "devcluster-logs" - - type: file - path: "/tmp/priority_scheduler/*.log" - service: "" - source: "devcluster-priority-scheduler-logs" diff --git a/.circleci/real_config.yml b/.circleci/real_config.yml index 8a46115d37a..9d47772ff58 100644 --- a/.circleci/real_config.yml +++ b/.circleci/real_config.yml @@ -321,25 +321,6 @@ commands: steps: - run: pip install codecov - upload-junit-datadog: - parameters: - service: - type: string - default: "" - env: - type: string - default: "ci-cpu" - path: - type: string - default: "/tmp/test-results" - steps: - - run: - name: Upload Tests to DataDog - when: always - command: | - curl -L --fail "https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64" --output "./datadog-ci" && chmod +x ./datadog-ci - ./datadog-ci junit upload --service "determined-ai/<< parameters.service >>" --env "<< parameters.env >>" << parameters.path >> - setup-go-intg-deps: steps: - install-protoc # Install newer version of protoc into $HOME/.local/bin, since default is proto2. @@ -610,50 +591,6 @@ commands: type: boolean default: true steps: - - when: - condition: - and: - - equal: [<>,'localhost'] - - <> - - not: <> - steps: - - run: - name: Install DataDog agent - command: | - if [ "$AIS_DD_ENABLE_MONITORING" == "true" ]; then - host_tags="test.mark:<>,\ - ci.pipeline_id:${CIRCLE_PIPELINE_ID},\ - ci.workflow_id:${CIRCLE_WORKFLOW_ID},\ - ci.job_num:${CIRCLE_BUILD_NUM},\ - ci.username:${CIRCLE_USERNAME},\ - git.tag:${CIRCLE_TAG},\ - git.commit:${CIRCLE_SHA1},\ - git.repo:${CIRCLE_PROJECT_REPONAME},\ - ci.totalNodes:${CIRCLE_NODE_TOTAL},\ - ci.nodeIdx:${CIRCLE_NODE_INDEX},\ - git.pr_num:${CIRCLE_PR_NUMBER}" - - sudo mkdir -p /tmp/artifacts/logs - sudo chmod -R a+rw /tmp/artifacts/logs - - DD_ENV="ci-${CIRCLE_JOB}" \ - DD_HOST_TAGS="$host_tags" \ - DD_SERVICE="determined-pytest-<>" \ - bash -c "$(curl -L https://s3.amazonaws.com/dd-agent/scripts/install_script_agent7.sh)" - - # config files for the agent have an expected file structure - sudo mkdir -p /etc/datadog-agent/conf.d/determined-master.d/ - sudo chmod a+rw /etc/datadog-agent/datadog.yaml - sudo chmod -R a+rw /etc/datadog-agent/conf.d/determined-master.d/ - sudo cat .circleci/datadog/ci-local-config.yaml >> /etc/datadog-agent/datadog.yaml - sudo sed -e "s//determined-pytest-<>/g" .circleci/datadog/e2e-log-settings.yaml > /etc/datadog-agent/conf.d/determined-master.d/conf.yaml - # restart agent with config - sudo usermod -a -G docker dd-agent - sudo systemctl stop datadog-agent - sudo systemctl start datadog-agent - sleep 5 - sudo datadog-agent status - fi # Wait for master before splitting tests, since so many splits depend on # asking master for its configuration in order to apply skipifs. - when: @@ -690,28 +627,12 @@ commands: echo "No Determined master listening on '<>://<>:<>'" fi - tags="test.mark:<>,\ - ci.pipeline_id:${CIRCLE_PIPELINE_ID},\ - ci.workflow_id:${CIRCLE_WORKFLOW_ID},\ - ci.job_num:${CIRCLE_BUILD_NUM},\ - ci.username:${CIRCLE_USERNAME},\ - git.tag:${CIRCLE_TAG},\ - git.commit:${CIRCLE_SHA1},\ - ci.totalNodes:${CIRCLE_NODE_TOTAL},\ - ci.nodeIdx:${CIRCLE_NODE_INDEX},\ - git.pr_num:${CIRCLE_PR_NUMBER}" - - CMD="DD_CIVISIBILITY_AGENTLESS_ENABLED=true \ - DD_TAGS='${tags}' \ - DD_ENV='ci-<>' \ - DD_SERVICE='determined-pytest-<>' \ - DET_MASTER_CERT_FILE=<> \ + CMD="DET_MASTER_CERT_FILE=<> \ DET_MASTER_CERT_NAME=<> \ IS_CIRCLECI_JOB=1 XDG_CONFIG_HOME=/tmp \ xargs pytest --capture=tee-sys -vv \ -m '<>' \ --durations=0 \ - --ddtrace \ --master-scheme='<>' \ --master-host='<>' \ --master-port='<>' \ @@ -736,20 +657,6 @@ commands: master_address: "<>://<>:<>" - store_artifacts: path: /tmp/artifacts/logs - - when: - condition: - and: - - equal: [<>,'localhost'] - - <> - - not: <> - steps: - - run: # We don't know how long Circle leaves these machines running in the background. Take down the agent for safety. - name: Stop DataDog agent - when: always - command: | - if [ "$AIS_DD_ENABLE_MONITORING" == "true" ]; then - sudo systemctl stop datadog-agent || true - fi run-det-deploy-tests: @@ -2161,15 +2068,11 @@ jobs: - wait-for-master: host: "localhost" port: "8082" - - run: npm install --save-dev dd-trace # DataDog integration - run: environment: PW_EE: << parameters.ee >> - NODE_OPTIONS: "-r dd-trace/ci/init" command: | if [[ "$PW_EE" -eq 1 ]]; then env="ee"; else env="oss"; fi - DD_ENV="ci-devcluster-$env" \ - DD_SERVICE=determined-ui-e2e \ PW_PASSWORD=${INITIAL_USER_PASSWORD} \ npm run e2e --prefix webui/react -- << parameters.playwright-options >> - store_artifacts: @@ -2197,9 +2100,6 @@ jobs: - codecov/upload: flags: "web" xtra_args: "-v" - - upload-junit-datadog: - service: - env: "ci-cpu" - store_test_results: path: webui/react/junit.xml - store_artifacts: @@ -2313,10 +2213,6 @@ jobs: - codecov/upload: flags: "backend" xtra_args: "-v -X fixes" - - upload-junit-datadog: - service: master/test-intg - env: ci-cpu - path: master/test-intg.junit.xml - store_test_results: path: master/test-intg.junit.xml - persist_to_workspace: @@ -2345,10 +2241,6 @@ jobs: - codecov/upload: flags: "backend" xtra_args: "-v -X fixes" - - upload-junit-datadog: - service: agent/test-intg - env: ci-cpu - path: agent/test-intg.junit.xml - store_test_results: path: agent/test-intg.junit.xml - persist_to_workspace: @@ -2467,9 +2359,6 @@ jobs: - run: COVERAGE_FILE=$PWD/test-unit-harness-cpu-pycov make -C harness test-cpu - run: coverage xml -i --data-file=./test-unit-harness-cpu-pycov - run: codecov -v -t $CODECOV_TOKEN -F harness - - upload-junit-datadog: - service: test-unit-harness-cpu - env: ci-cpu - persist_to_workspace: root: . paths: @@ -2494,9 +2383,6 @@ jobs: - run: COVERAGE_FILE=/root/project/test-unit-harness-gpu-tf-pycov make -C harness test-gpu-tf - run: coverage xml -i --data-file=./test-unit-harness-gpu-tf-pycov - run: codecov -v -t $CODECOV_TOKEN -F harness - - upload-junit-datadog: - service: test-unit-harness-gpu - env: ci-gpu - persist_to_workspace: root: . paths: @@ -2521,9 +2407,6 @@ jobs: - run: COVERAGE_FILE=/root/project/test-unit-harness-pytorch2-gpu-pycov make -C harness test-pytorch-gpu - run: coverage xml -i --data-file=./test-unit-harness-pytorch2-gpu-pycov - run: codecov -v -t $CODECOV_TOKEN -F harness - - upload-junit-datadog: - service: test-unit-harness-pytorch2-gpu - env: ci-gpu - persist_to_workspace: root: . paths: @@ -2547,9 +2430,6 @@ jobs: - run: COVERAGE_FILE=/root/project/test-unit-harness-pytorch2-cpu-pycov make -C harness test-pytorch-cpu - run: coverage xml -i --data-file=./test-unit-harness-pytorch2-cpu-pycov - run: codecov -v -t $CODECOV_TOKEN -F harness - - upload-junit-datadog: - service: test-unit-harness-pytorch2-cpu - env: ci-cpu - persist_to_workspace: root: . paths: @@ -2574,9 +2454,6 @@ jobs: - run: COVERAGE_FILE=/root/project/test-unit-harness-gpu-parallel-pycov make -C harness test-gpu-parallel - run: coverage xml -i --data-file=./test-unit-harness-gpu-parallel-pycov - run: codecov -v -t $CODECOV_TOKEN -F harness - - upload-junit-datadog: - service: test-unit-harness-gpu-parallel - env: ci-gpu - persist_to_workspace: root: . paths: @@ -2601,9 +2478,6 @@ jobs: - run: COVERAGE_FILE=/root/project/test-unit-harness-gpu-deepspeed-pycov make -C harness test-gpu-deepspeed - run: coverage xml -i --data-file=./test-unit-harness-gpu-deepspeed-pycov - run: codecov -v -t $CODECOV_TOKEN -F harness - - upload-junit-datadog: - service: test-unit-harness-gpu-deepseed - env: ci-gpu - persist_to_workspace: root: . paths: @@ -2628,9 +2502,6 @@ jobs: - run: COVERAGE_FILE=$PWD/test-unit-harness-tf2-pycov make -C harness test-tf2 - run: coverage xml -i --data-file=./test-unit-harness-tf2-pycov - run: codecov -v -t $CODECOV_TOKEN -F harness - - upload-junit-datadog: - service: test-unit-harness-tf2 - env: ci-cpu - persist_to_workspace: root: . paths: @@ -2656,9 +2527,6 @@ jobs: - run: COVERAGE_FILE=$PWD/test-unit-storage-pycov coverage run -m pytest -v --durations=0 --junitxml=/tmp/test-results/test-unit-storage.xml --require-secrets -m cloud harness/tests - run: coverage xml -i --data-file=./test-unit-storage-pycov - run: codecov -v -t $CODECOV_TOKEN -F harness - - upload-junit-datadog: - service: test-unit-storage - env: ci-cpu - persist_to_workspace: root: . paths: diff --git a/e2e_tests/tests/requirements.txt b/e2e_tests/tests/requirements.txt index 3880a97013a..c9b89f75742 100644 --- a/e2e_tests/tests/requirements.txt +++ b/e2e_tests/tests/requirements.txt @@ -23,8 +23,6 @@ ray[default,tune] pyarrow # Pydantic V2 has changes that break existing ray tests pydantic<2 -# DataDog tracing and result upload utility -ddtrace # Selenium for okta testing selenium # Pexpect for okta testing