diff --git a/.github/actions/test/action.yml b/.github/actions/test/action.yml
new file mode 100644
index 0000000..0ca2a08
--- /dev/null
+++ b/.github/actions/test/action.yml
@@ -0,0 +1,135 @@
+name: 'Test'
+description: 'A GitHub Action that tests this action'
+
+inputs:
+ os:
+ description: operating system, e.g. ubuntu-22.04
+ required: true
+ python-version:
+ description: Python version, e.g. 3.11
+ required: true
+
+runs:
+ using: 'composite'
+ steps:
+ - name: Setup Ubuntu
+ if: startsWith(inputs.os, 'ubuntu')
+ run: |
+ sudo apt-get update
+ sudo apt-get install language-pack-en language-pack-de
+ shell: bash
+
+ - name: Setup Python
+ if: inputs.python-version != 'installed'
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ inputs.python-version }}
+
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Detect OS
+ id: os
+ env:
+ OS: ${{ inputs.os }}
+ run: |
+ case "$OS" in
+ ubuntu*)
+ echo "pip-cache=~/.cache/pip" >> $GITHUB_OUTPUT
+ ;;
+ macos*)
+ echo "pip-cache=~/Library/Caches/pip" >> $GITHUB_OUTPUT
+ ;;
+ windows*)
+ echo "pip-cache=~\\AppData\\Local\\pip\\Cache" >> $GITHUB_OUTPUT
+ ;;
+ esac
+ echo "date=$(date +%Y%m%d 2> /dev/null || true)" >> $GITHUB_OUTPUT
+ shell: bash
+
+ - name: Cache PIP Packages
+ uses: actions/cache@v3
+ id: cache
+ with:
+ path: ${{ steps.os.outputs.pip-cache }}
+ key: ${{ inputs.os }}-pip-test-${{ inputs.python-version }}-${{ hashFiles('**/requirements.txt', '**/constraints.txt') }}-${{ steps.os.outputs.date }}
+ restore-keys: |
+ ${{ inputs.os }}-pip-test-${{ inputs.python-version }}-${{ hashFiles('**/requirements.txt', '**/constraints.txt') }}-
+ ${{ inputs.os }}-pip-test-${{ inputs.python-version }}-
+ ${{ inputs.os }}-pip-test-
+
+ - name: Install Python dependencies
+ run: |
+ python3 -V
+ python3 -m pip freeze | sort
+ python3 -m pip cache info || true
+ python3 -m pip cache list || true
+ python3 -m pip install --upgrade --force pip wheel
+ python3 -m pip install --force -r python/requirements.txt
+ python3 -m pip install --force -r python/test/requirements.txt -c python/test/constraints.txt
+ python3 -m pip freeze | sort
+ python3 -m pip cache info || true
+ python3 -m pip cache list || true
+ shell: bash
+
+ - name: Update expectation files
+ id: changes
+ continue-on-error: true
+ run: |
+ python/test/files/update_expectations.sh
+ git status
+
+ if ! git diff --exit-code || [[ $(git ls-files -o --exclude-standard | wc -l) -gt 0 ]]
+ then
+ # we only upload the changed files if we can find zip
+ if which zip
+ then
+ (git diff --name-only && git ls-files -o --exclude-standard) | xargs -d "\n" zip changed-expectations.zip
+ exit 1
+ fi
+ fi
+ shell: bash
+ - name: Upload changed expectation files
+ if: steps.changes.outcome == 'failure'
+ uses: actions/upload-artifact@v3
+ with:
+ name: Changed expectations
+ path: changed-expectations.zip
+ if-no-files-found: error
+
+ - name: PyTest
+ env:
+ PYTHONPATH: ..
+ run: |
+ cd python/test
+ python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest.xml
+ shell: bash
+
+ - name: PyTest (EST)
+ env:
+ TZ: US/Eastern
+ LANG: "en_US.UTF-8"
+ PYTHONPATH: ..
+ run: |
+ cd python/test
+ python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest-est.xml
+ shell: bash
+
+ - name: PyTest (CET)
+ env:
+ TZ: Europe/Berlin
+ LANG: "de_DE.UTF-8"
+ PYTHONPATH: ..
+ run: |
+ cd python/test
+ python3 -m pytest --capture=tee-sys --continue-on-collection-errors --junit-xml ../../test-results/pytest-cet.xml
+ shell: bash
+
+ - name: Upload Test Results
+ if: always()
+ uses: actions/upload-artifact@v3
+ with:
+ name: Test Results (python-${{ inputs.python-version }}, ${{ inputs.os }})
+ path: |
+ test-results/*.xml
+ unit-test-results.json
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..35bd16d
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,10 @@
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "monthly"
+ - package-ecosystem: "pip"
+ directory: "/"
+ schedule:
+ interval: "weekly"
diff --git a/.github/upgrade-pip-packages.sh b/.github/upgrade-pip-packages.sh
new file mode 100755
index 0000000..bfcfddf
--- /dev/null
+++ b/.github/upgrade-pip-packages.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+set -euo pipefail
+
+base="$(dirname "$0")"
+
+pip install --upgrade --force pip==22.0.0
+pip install --upgrade --upgrade-strategy eager -r "$base/../python/requirements-direct.txt"
+
+pip install pipdeptree
+pipdeptree --packages="$(sed -e "s/;.*//" -e "s/=.*//g" "$base/../python/requirements-direct.txt" | paste -s -d ,)" --freeze > "$base/../python/requirements.txt"
+
+git diff "$base/../python/requirements.txt"
+
diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml
new file mode 100644
index 0000000..c8dd108
--- /dev/null
+++ b/.github/workflows/ci-cd.yml
@@ -0,0 +1,165 @@
+name: CI/CD
+
+on:
+ push:
+ branches:
+ - 'master*'
+ - 'devel-*'
+ tags:
+ - '*'
+ pull_request:
+ schedule:
+ - cron: '0 16 * * *'
+ workflow_dispatch:
+permissions: {}
+
+jobs:
+ dependencies:
+ name: Test python/requirements.txt
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ - name: Check requirements.txt against requirements-direct.txt
+ run: |
+ (diff -w python/requirements-direct.txt python/requirements.txt || true) | (! grep -e "^<")
+ shell: bash
+ - name: Check for dependency updates
+ continue-on-error: true
+ run:
+ .github/upgrade-pip-packages.sh
+ shell: bash
+
+ test-mac:
+ name: "Test macOS"
+ uses: "./.github/workflows/test-os.yml"
+ with:
+ os: '["macos-11", "macos-12", "macos-13"]'
+
+ test-lnx:
+ name: "Test Ubuntu"
+ uses: "./.github/workflows/test-os.yml"
+ with:
+ os: '["ubuntu-20.04", "ubuntu-22.04"]'
+
+ test-win:
+ name: "Test Windows"
+ uses: "./.github/workflows/test-os.yml"
+ with:
+ os: '["windows-2019", "windows-2022"]'
+
+ publish:
+ name: "Publish"
+ needs: [test-mac, test-lnx, test-win]
+ # we run the action from this branch whenever we can (when it runs in our repo's context)
+ if: >
+ ! cancelled() &&
+ github.event.sender.login != 'dependabot[bot]' &&
+ ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository )
+ uses: "./.github/workflows/publish.yml"
+ permissions:
+ checks: write
+ pull-requests: write
+ security-events: write
+
+ config-deploy:
+ name: Configure Deployment
+ needs: [test-mac, test-lnx, test-win]
+ # do not build or deploy on forked repositories
+ if: github.repository_owner == 'step-security'
+ runs-on: ubuntu-latest
+ outputs:
+ image: ${{ steps.action.outputs.image }}
+ image-exists: ${{ steps.image.outputs.exists }}
+ image-version: ${{ steps.action.outputs.version }}
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Extract action image and version
+ # we deploy from a specific commit on main (the one that mentions a new version the first time)
+ # so we need to tell docker/metadata-action to extract docker tags from that version
+ id: action
+ run: |
+ image=$(grep -A 10 "^runs:" action.yml | grep -E "^\s+image:\s" | sed -E -e "s/^\s+image:\s*'//" -e "s/docker:\/\///" -e "s/'\s*$//")
+ version=$(cut -d : -f 2 <<< "$image")
+ echo "image=$image" >>$GITHUB_OUTPUT
+ echo "version=$version" >>$GITHUB_OUTPUT
+ shell: bash
+
+ - name: Check action image existence
+ id: image
+ env:
+ DOCKER_CLI_EXPERIMENTAL: enabled
+ run: |
+ if docker manifest inspect '${{ steps.action.outputs.image }}'
+ then
+ echo "exists=true" >>$GITHUB_OUTPUT
+ fi
+ shell: bash
+
+ deploy:
+ name: Deploy to GitHub
+ needs: [publish, config-deploy]
+
+ # do not build or deploy on forked repositories
+ if: github.repository_owner == 'step-security'
+ runs-on: ubuntu-latest
+ permissions:
+ packages: write
+ steps:
+ - name: Docker meta
+ id: docker-meta
+ uses: docker/metadata-action@v4
+ with:
+ images: ghcr.io/step-security/publish-unit-test-result-action
+ flavor: |
+ latest=false
+ prefix=v
+ tags: |
+ type=sha
+ type=ref,event=tag
+ type=semver,pattern={{major}},value=${{ needs.config-deploy.outputs.image-version }}
+ type=semver,pattern={{major}}.{{minor}},value=${{ needs.config-deploy.outputs.image-version }}
+ type=semver,pattern={{version}},value=${{ needs.config-deploy.outputs.image-version }}
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v2
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+
+ - name: Login to GitHub Container Registry
+ uses: docker/login-action@v2
+ with:
+ registry: ghcr.io
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Build and push Docker image
+ uses: docker/build-push-action@v4
+ with:
+ tags: ${{ steps.docker-meta.outputs.tags }}
+ labels: ${{ steps.docker-meta.outputs.labels }}
+ platforms: linux/amd64,linux/arm64
+ pull: true
+ # deploy image actions from commits pushed to master and
+ # deploy Dockerfile actions from pushed version tags (no major versions)
+ push: |
+ ${{
+ github.event_name == 'push' && (
+ needs.config-deploy.outputs.image != 'Dockerfile' && startsWith(github.ref, 'refs/heads/master') && needs.config-deploy.outputs.image-exists != 'true' ||
+ needs.config-deploy.outputs.image == 'Dockerfile' && startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '.')
+ )
+ }}
+
+ event_file:
+ name: "Event File"
+ runs-on: ubuntu-latest
+ steps:
+ - name: Upload
+ uses: actions/upload-artifact@v3
+ with:
+ name: Event File
+ path: ${{ github.event_path }}
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
new file mode 100644
index 0000000..3be9041
--- /dev/null
+++ b/.github/workflows/codeql.yml
@@ -0,0 +1,63 @@
+name: "CodeQL"
+
+on:
+ push:
+ branches:
+ - master
+ - 'devel-*'
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches:
+ - master
+ - 'devel-*'
+ schedule:
+ - cron: '30 15 * * 3'
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ubuntu-latest
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'python' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
+ # Learn more about CodeQL language support at https://git.io/codeql-language-support
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v2
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+ # queries: ./path/to/local/query, your-org/your-repo/queries@main
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v2
+
+ # ℹ️ Command-line programs to run using the OS shell.
+ # 📚 https://git.io/JvXDl
+
+ # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
+ # and modify them (or add more) to build your code if your project
+ # uses a compiled language
+
+ #- run: |
+ # make bootstrap
+ # make release
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v2
diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
new file mode 100644
index 0000000..0b2b425
--- /dev/null
+++ b/.github/workflows/publish.yml
@@ -0,0 +1,375 @@
+name: Publish
+
+on:
+ workflow_call:
+
+jobs:
+ publish-dockerfile:
+ name: Publish Test Results (Dockerfile)
+ runs-on: ubuntu-latest
+ permissions:
+ checks: write
+ pull-requests: write
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Download Artifacts
+ uses: actions/download-artifact@v3
+ with:
+ path: artifacts
+
+ - name: Prepare publish action from this branch
+ run: |
+ sed --in-place "s/image: .*/image: 'Dockerfile'/" action.yml
+ shell: bash
+
+ - name: Publish Test Results
+ id: test-results
+ uses: ./
+ with:
+ check_name: Test Results (Dockerfile)
+ files: "artifacts/**/*.xml"
+ json_file: "tests.json"
+ json_suite_details: true
+ json_test_case_results: true
+ report_suite_logs: "any"
+ log_level: DEBUG
+
+ - name: JSON output
+ uses: ./misc/action/json-output
+ with:
+ json: '${{ steps.test-results.outputs.json }}'
+ json_file: 'tests.json'
+
+ publish-docker-image:
+ name: Publish Test Results (Docker Image ${{ matrix.arch }})
+ runs-on: ubuntu-latest
+ permissions:
+ checks: write
+ pull-requests: write
+ security-events: write
+ strategy:
+ fail-fast: false
+ matrix:
+ arch: [amd64, arm64]
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+ with:
+ image: tonistiigi/binfmt:latest
+ platforms: ${{ matrix.arch }}
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+
+ - name: Build Docker image
+ id: build
+ uses: docker/build-push-action@v4
+ with:
+ load: true
+ push: false
+ platforms: linux/${{ matrix.arch }}
+ tags: step-security/publish-unit-test-result-action:latest
+ outputs: type=docker
+
+ - name: Download Artifacts
+ uses: actions/download-artifact@v3
+ with:
+ path: artifacts
+
+ - name: Publish Test Results
+ id: test-results
+ if: always()
+ env:
+ INPUT_GITHUB_TOKEN: ${{ github.token }}
+ INPUT_CHECK_NAME: Test Results (Docker Image ${{ matrix.arch }})
+ INPUT_FILES: "artifacts/**/*.xml"
+ INPUT_JSON_FILE: "tests.json"
+ INPUT_JSON_SUITE_DETAILS: true
+ INPUT_JSON_TEST_CASE_RESULTS: true
+ INPUT_REPORT_SUITE_LOGS: "any"
+ run: |
+ docker run --platform linux/${{ matrix.arch }} \
+ --workdir $GITHUB_WORKSPACE \
+ --rm \
+ -e "INPUT_CHECK_NAME" \
+ -e "INPUT_JSON_FILE" \
+ -e "INPUT_JSON_SUITE_DETAILS" \
+ -e "INPUT_JSON_TEST_CASE_RESULTS" \
+ -e "INPUT_LOG_LEVEL" \
+ -e "INPUT_ROOT_LOG_LEVEL" \
+ -e "INPUT_GITHUB_TOKEN" \
+ -e "INPUT_GITHUB_TOKEN_ACTOR" \
+ -e "INPUT_GITHUB_RETRIES" \
+ -e "INPUT_COMMIT" \
+ -e "INPUT_COMMENT_TITLE" \
+ -e "INPUT_COMMENT_MODE" \
+ -e "INPUT_FAIL_ON" \
+ -e "INPUT_ACTION_FAIL" \
+ -e "INPUT_ACTION_FAIL_ON_INCONCLUSIVE" \
+ -e "INPUT_FILES" \
+ -e "INPUT_JUNIT_FILES" \
+ -e "INPUT_NUNIT_FILES" \
+ -e "INPUT_XUNIT_FILES" \
+ -e "INPUT_TRX_FILES" \
+ -e "INPUT_TIME_UNIT" \
+ -e "INPUT_TEST_FILE_PREFIX" \
+ -e "INPUT_REPORT_INDIVIDUAL_RUNS" \
+ -e "INPUT_REPORT_SUITE_LOGS" \
+ -e "INPUT_DEDUPLICATE_CLASSES_BY_FILE_NAME" \
+ -e "INPUT_LARGE_FILES" \
+ -e "INPUT_IGNORE_RUNS" \
+ -e "INPUT_JOB_SUMMARY" \
+ -e "INPUT_COMPARE_TO_EARLIER_COMMIT" \
+ -e "INPUT_PULL_REQUEST_BUILD" \
+ -e "INPUT_EVENT_FILE" \
+ -e "INPUT_EVENT_NAME" \
+ -e "INPUT_TEST_CHANGES_LIMIT" \
+ -e "INPUT_CHECK_RUN_ANNOTATIONS" \
+ -e "INPUT_CHECK_RUN_ANNOTATIONS_BRANCH" \
+ -e "INPUT_SECONDS_BETWEEN_GITHUB_READS" \
+ -e "INPUT_SECONDS_BETWEEN_GITHUB_WRITES" \
+ -e "INPUT_SECONDARY_RATE_LIMIT_WAIT_SECONDS" \
+ -e "INPUT_JSON_THOUSANDS_SEPARATOR" \
+ -e "INPUT_SEARCH_PULL_REQUESTS" \
+ -e "HOME" \
+ -e "GITHUB_JOB" \
+ -e "GITHUB_REF" \
+ -e "GITHUB_SHA" \
+ -e "GITHUB_REPOSITORY" \
+ -e "GITHUB_REPOSITORY_OWNER" \
+ -e "GITHUB_RUN_ID" \
+ -e "GITHUB_RUN_NUMBER" \
+ -e "GITHUB_RETENTION_DAYS" \
+ -e "GITHUB_RUN_ATTEMPT" \
+ -e "GITHUB_ACTOR" \
+ -e "GITHUB_TRIGGERING_ACTOR" \
+ -e "GITHUB_WORKFLOW" \
+ -e "GITHUB_HEAD_REF" \
+ -e "GITHUB_BASE_REF" \
+ -e "GITHUB_EVENT_NAME" \
+ -e "GITHUB_SERVER_URL" \
+ -e "GITHUB_API_URL" \
+ -e "GITHUB_GRAPHQL_URL" \
+ -e "GITHUB_REF_NAME" \
+ -e "GITHUB_REF_PROTECTED" \
+ -e "GITHUB_REF_TYPE" \
+ -e "GITHUB_WORKSPACE" \
+ -e "GITHUB_ACTION" \
+ -e "GITHUB_EVENT_PATH" \
+ -e "GITHUB_ACTION_REPOSITORY" \
+ -e "GITHUB_ACTION_REF" \
+ -e "GITHUB_PATH" \
+ -e "GITHUB_ENV" \
+ -e "GITHUB_STEP_SUMMARY" \
+ -e "GITHUB_STATE" \
+ -e "GITHUB_OUTPUT" \
+ -e "RUNNER_OS" \
+ -e "RUNNER_ARCH" \
+ -e "RUNNER_NAME" \
+ -e "RUNNER_TOOL_CACHE" \
+ -e "RUNNER_TEMP" \
+ -e "RUNNER_WORKSPACE" \
+ -e "ACTIONS_RUNTIME_URL" \
+ -e "ACTIONS_RUNTIME_TOKEN" \
+ -e "ACTIONS_CACHE_URL" \
+ -e GITHUB_ACTIONS=true \
+ -e CI=true \
+ -v "$RUNNER_TEMP":"$RUNNER_TEMP" \
+ -v "/var/run/docker.sock":"/var/run/docker.sock" \
+ -v "/home/runner/work/_temp/_github_home":"/github/home" \
+ -v "/home/runner/work/_temp/_github_workflow":"/github/workflow" \
+ -v "/home/runner/work/_temp/_runner_file_commands":"/github/file_commands" \
+ -v "/home/runner/work/publish-unit-test-result-action/publish-unit-test-result-action":"$GITHUB_WORKSPACE" \
+ step-security/publish-unit-test-result-action:latest
+ shell: bash
+
+ - name: JSON output
+ uses: ./misc/action/json-output
+ with:
+ json: '${{ steps.test-results.outputs.json }}'
+ json_file: 'tests.json'
+
+ - name: Scan for vulnerabilities
+ id: scan
+ uses: crazy-max/ghaction-container-scan@v2
+ with:
+ image: step-security/publish-unit-test-result-action:latest
+ dockerfile: ./Dockerfile
+ annotations: true
+ - name: Upload SARIF artifact
+ uses: actions/upload-artifact@v3
+ with:
+ name: SARIF
+ path: ${{ steps.scan.outputs.sarif }}
+ - name: Upload SARIF file
+ if: always() && steps.scan.outputs.sarif != ''
+ uses: github/codeql-action/upload-sarif@v2
+ with:
+ sarif_file: ${{ steps.scan.outputs.sarif }}
+
+ publish-composite:
+ name: Publish Test Results (${{ matrix.os-label }} python ${{ matrix.python }})
+ runs-on: ${{ matrix.os }}
+ permissions:
+ checks: write
+ pull-requests: write
+
+ strategy:
+ fail-fast: false
+ max-parallel: 3
+ matrix:
+ # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources
+ # test *-latest and newer (because newer eventually become 'latest' and should be tested to work before that)
+ include:
+ - os: macos-latest
+ os-label: macOS
+ python: "3.8"
+ - os: macos-latest
+ os-label: macOS
+ python: "installed"
+ - os: macos-11
+ os-label: macOS 11
+ python: "installed"
+
+ - os: ubuntu-latest
+ os-label: Linux
+ python: "3.8"
+ - os: ubuntu-latest
+ os-label: Linux
+ python: "installed"
+ - os: ubuntu-20.04
+ os-label: Linux 20.04
+ python: "installed"
+
+ - os: windows-latest
+ os-label: Windows
+ python: "installed"
+ - os: windows-2019
+ os-label: Windows 2019
+ python: "installed"
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Setup Python
+ if: matrix.python != 'installed'
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python }}
+
+ - name: Download Artifacts
+ uses: actions/download-artifact@v3
+ with:
+ path: artifacts
+
+ - name: Publish Test Results
+ id: test-results
+ uses: ./composite
+ with:
+ check_name: Test Results (${{ matrix.os-label }} python ${{ matrix.python }})
+ files: |
+ artifacts/**/*.xml
+ artifacts\**\*.xml
+ json_file: "tests.json"
+ json_suite_details: true
+ json_test_case_results: true
+ report_suite_logs: "any"
+
+ - name: JSON output
+ uses: ./misc/action/json-output
+ with:
+ json: '${{ steps.test-results.outputs.json }}'
+ json_file: 'tests.json'
+
+ publish-test-files:
+ name: Publish Test Files
+ runs-on: ubuntu-latest
+ permissions:
+ checks: write
+ pull-requests: write
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Copy test result files
+ run: cp -rv python/test/files test-files
+ shell: bash
+
+ - name: Prepare publish action from this branch
+ run: |
+ sed --in-place "s/image: .*/image: 'Dockerfile'/" action.yml
+ shell: bash
+
+ - name: Publish Test Results
+ id: test-results
+ uses: ./
+ with:
+ check_name: Test Results (Test Files)
+ fail_on: nothing
+ files: |
+ test-files/**/*.xml
+ test-files/**/*.trx
+ test-files/**/*.json
+ junit_files: "test-files/junit-xml/**/*.xml"
+ nunit_files: "test-files/nunit/**/*.xml"
+ xunit_files: "test-files/xunit/**/*.xml"
+ trx_files: "test-files/trx/**/*.trx"
+ json_file: "tests.json"
+ json_suite_details: true
+ json_test_case_results: true
+ report_suite_logs: "any"
+ log_level: DEBUG
+
+ - name: JSON output
+ uses: ./misc/action/json-output
+ with:
+ json: '${{ steps.test-results.outputs.json }}'
+ json_file: 'tests.json'
+
+ publish-test-file:
+ name: Publish Test File
+ runs-on: ubuntu-latest
+ permissions:
+ checks: write
+ pull-requests: write
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Copy test junit xml files
+ run: cp -rv python/test/files/junit-xml test-files
+ shell: bash
+
+ - name: Prepare publish action from this branch
+ run: |
+ sed --in-place "s/image: .*/image: 'Dockerfile'/" action.yml
+ shell: bash
+
+ - name: Publish Test Results
+ id: test-results
+ uses: ./
+ with:
+ check_name: Test Results (Test File)
+ fail_on: nothing
+ files: "test-files/pytest/junit.gloo.standalone.xml"
+ json_file: "tests.json"
+ json_suite_details: true
+ json_test_case_results: true
+ report_suite_logs: "any"
+ log_level: DEBUG
+
+ - name: JSON output
+ uses: ./misc/action/json-output
+ with:
+ json: '${{ steps.test-results.outputs.json }}'
+ json_file: 'tests.json'
diff --git a/.github/workflows/test-os.yml b/.github/workflows/test-os.yml
new file mode 100644
index 0000000..9f90a0c
--- /dev/null
+++ b/.github/workflows/test-os.yml
@@ -0,0 +1,31 @@
+name: Test OS
+
+on:
+ workflow_call:
+ inputs:
+ os:
+ required: true
+ type: string
+jobs:
+ test:
+ name: Test (python-${{ matrix.python-version }}, ${{ matrix.os }})
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: ${{ fromJson(inputs.os) }}
+ python-version: ["3.8", "3.9", "3.10", "3.11", "3.12.0-rc.3", "installed"]
+
+ include:
+ - os: ${{ fromJson(inputs.os)[0] }}
+ python-version: "3.7"
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Test
+ uses: ./.github/actions/test
+ with:
+ os: ${{ matrix.os }}
+ python-version: ${{ matrix.python-version }}
diff --git a/.github/workflows/test-results.yml b/.github/workflows/test-results.yml
new file mode 100644
index 0000000..4f8d219
--- /dev/null
+++ b/.github/workflows/test-results.yml
@@ -0,0 +1,71 @@
+name: Test Results (reference)
+
+on:
+ workflow_run:
+ workflows: ["CI/CD"]
+ types:
+ - completed
+permissions: {}
+
+jobs:
+ test-results:
+ name: Test Results (reference)
+ if: github.event.workflow_run.conclusion != 'skipped'
+ runs-on: ubuntu-latest
+ permissions:
+ checks: write
+ pull-requests: write
+
+ steps:
+ - name: Download and Extract Artifacts
+ uses: dawidd6/action-download-artifact@246dbf436b23d7c49e21a7ab8204ca9ecd1fe615
+ with:
+ run_id: ${{ github.event.workflow_run.id }}
+ path: artifacts
+
+ - name: Publish Test Results
+ id: test-results
+ uses: step-security/publish-unit-test-result-action/composite@main
+ with:
+ commit: ${{ github.event.workflow_run.head_sha }}
+ check_name: Test Results (reference)
+ event_file: artifacts/Event File/event.json
+ event_name: ${{ github.event.workflow_run.event }}
+ check_run_annotations_branch: "master, master-1.x, devel-1.0, devel-2.0"
+ files: "artifacts/**/*.xml"
+ log_level: DEBUG
+
+ - name: Set badge color
+ if: github.event.workflow_run.event != 'schedule'
+ run: |
+ case ${{ fromJSON( steps.test-results.outputs.json ).conclusion }} in
+ success)
+ echo "BADGE_COLOR=31c653" >> $GITHUB_ENV
+ ;;
+ failure)
+ echo "BADGE_COLOR=800000" >> $GITHUB_ENV
+ ;;
+ neutral)
+ echo "BADGE_COLOR=696969" >> $GITHUB_ENV
+ ;;
+ esac
+ shell: bash
+
+ - name: Create badge
+ if: github.event.workflow_run.event != 'schedule'
+ uses: emibcn/badge-action@4209421db54c8764d8932070ffd0f81715a629bf
+ with:
+ label: Tests
+ status: '${{ fromJSON( steps.test-results.outputs.json ).formatted.stats.tests }} tests, ${{ fromJSON( steps.test-results.outputs.json ).formatted.stats.runs }} runs: ${{ fromJSON( steps.test-results.outputs.json ).conclusion }}'
+ color: ${{ env.BADGE_COLOR }}
+ path: tests.svg
+
+ - name: Upload badge to Gist
+ # Upload only for master branch and not for scheduled event
+ if: >
+ github.event_name == 'workflow_run' && github.event.workflow_run.head_branch == 'master' && github.event.workflow_run.event != 'schedule'
+ uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d
+ with:
+ token: ${{ secrets.GIST_TOKEN }}
+ gistURL: https://gist.githubusercontent.com/step-security/612cb538c14731f1a8fefe504f519395
+ file: tests.svg
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..e70ac1d
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,20 @@
+FROM python:3.8-alpine
+
+LABEL repository="https://github.com/step-security/publish-unit-test-result-action"
+LABEL homepage="https://github.com/step-security/publish-unit-test-result-action"
+
+LABEL com.github.actions.name="Publish Test Results"
+LABEL com.github.actions.description="A GitHub Action to publish test results."
+
+RUN apk add --no-cache --upgrade expat libuuid
+
+COPY python/requirements.txt /action/
+RUN apk add --no-cache build-base libffi-dev; \
+ pip install --upgrade --force --no-cache-dir pip && \
+ pip install --upgrade --force --no-cache-dir -r /action/requirements.txt; \
+ apk del build-base libffi-dev
+
+COPY python/publish /action/publish
+COPY python/publish_test_results.py /action/
+
+ENTRYPOINT ["python", "/action/publish_test_results.py"]
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..1ea41e8
--- /dev/null
+++ b/README.md
@@ -0,0 +1,806 @@
+# GitHub Action to Publish Test Results
+
+![Arm badge](misc/badge-arm.svg)
+![Ubuntu badge](misc/badge-ubuntu.svg)
+![macOS badge](misc/badge-macos.svg)
+![Windows badge](misc/badge-windows.svg)
+![XML badge](misc/badge-xml.svg)
+![TRX badge](misc/badge-trx.svg)
+![JS badge](misc/badge-js.svg)
+
+This [GitHub Action](https://github.com/actions) analyses test result files and
+publishes the results on GitHub. It supports [JSON (Dart, Mocha), TRX (MSTest, VS) and XML (JUnit, NUnit, XUnit) file formats](#generating-test-result-files),
+and runs on Linux, macOS and Windows.
+
+You can use this action with ![Ubuntu Linux](misc/badge-ubuntu.svg) runners (e.g. `runs-on: ubuntu-latest`)
+or ![ARM Linux](misc/badge-arm.svg) self-hosted runners:
+
+```yaml
+- name: Publish Test Results
+ uses: step-security/publish-unit-test-result-action@v1
+ if: always()
+ with:
+ files: |
+ test-results/**/*.xml
+ test-results/**/*.trx
+ test-results/**/*.json
+```
+
+See the [notes on running this action with absolute paths](#running-with-absolute-paths) if you cannot use relative test result file paths.
+
+Use this for ![macOS](misc/badge-macos.svg) (e.g. `runs-on: macos-latest`)
+and ![Windows](misc/badge-windows.svg) (e.g. `runs-on: windows-latest`) runners:
+
+```yaml
+- name: Publish Test Results
+ uses: step-security/publish-unit-test-result-action/composite@v1
+ if: always()
+ with:
+ files: |
+ test-results/**/*.xml
+ test-results/**/*.trx
+ test-results/**/*.json
+```
+
+See the [notes on running this action as a composite action](#running-as-a-composite-action) if you run it on Windows or macOS.
+
+If you see the `"Resource not accessible by integration"` error, you have to grant additional [permissions](#permissions), or
+[setup the support for pull requests from fork repositories and branches created by Dependabot](#support-fork-repositories-and-dependabot-branches).
+
+The `if: always()` clause guarantees that this action always runs, even if earlier steps (e.g., the test step) in your workflow fail.
+
+When run multiple times in one workflow, the [option](#configuration) `check_name` has to be set to a unique value for each instance.
+Otherwise, the multiple runs overwrite each other's results.
+
+***Note:** By default, this action does not fail if tests failed. This can be [configured](#configuration) via `action_fail`.
+The action that executed the tests should fail on test failure. The published results however indicate failure if tests fail or errors occur,
+which can be [configured](#configuration) via `fail_on`.*
+
+## Permissions
+
+Minimal [workflow job permissions](https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs#example-setting-permissions-for-a-specific-job)
+required by this action in **public** GitHub repositories are:
+
+```yaml
+permissions:
+ checks: write
+ pull-requests: write
+```
+
+The following permissions are required in **private** GitHub repos:
+
+```yaml
+permissions:
+ contents: read
+ issues: read
+ checks: write
+ pull-requests: write
+```
+
+With `comment_mode: off`, the `pull-requests: write` permission is not needed.
+
+## Generating test result files
+
+Supported test result files can be generated by many test environments. Here is a small overview, by far not complete.
+Check your favorite development and test environment for its JSON, TRX file or JUnit, NUnit, XUnit XML file support.
+
+|Test Environment |Language| JUnit
XML | NUnit
XML | XUnit
XML | TRX
file | JSON
file |
+|-----------------|:------:|:---------:|:---------:|:---------:|:---:|:---:|
+|[Dart](https://github.com/dart-lang/test/blob/master/pkgs/test/doc/json_reporter.md)|Dart, Flutter| | | | | :heavy_check_mark: |
+|[Jest](https://jestjs.io/docs/configuration#default-reporter)|JavaScript|:heavy_check_mark:| | | | |
+|[Maven](https://maven.apache.org/surefire/maven-surefire-plugin/examples/junit.html)|Java, Scala, Kotlin|:heavy_check_mark:| | | | |
+|[Mocha](https://mochajs.org/#xunit)|JavaScript|:heavy_check_mark:| |[not xunit](https://github.com/mochajs/mocha/issues/4758)| | :heavy_check_mark: |
+|[MStest / dotnet](https://github.com/Microsoft/vstest-docs/blob/main/docs/report.md#syntax-of-default-loggers)|.Net|[:heavy_check_mark:](https://github.com/spekt/junit.testlogger#usage)|[:heavy_check_mark:](https://github.com/spekt/nunit.testlogger#usage)|[:heavy_check_mark:](https://github.com/spekt/xunit.testlogger#usage)|[:heavy_check_mark:](https://github.com/Microsoft/vstest-docs/blob/main/docs/report.md#syntax-of-default-loggers)| |
+|[pytest](https://docs.pytest.org/en/latest/how-to/output.html#creating-junitxml-format-files)|Python|:heavy_check_mark:| | | | |
+|[sbt](https://www.scala-sbt.org/release/docs/Testing.html#Test+Reports)|Scala|:heavy_check_mark:| | | | |
+|Your favorite
environment|Your favorite
language|probably
:heavy_check_mark:| | | | |
+
+## What is new in version 2
+
+
+These changes have to be considered when moving from version 1 to version 2:
+
+### Default value for `check_name` changed
+Unless `check_name` is set in your config, the check name used to publish test results changes from `"Unit Test Results"` to `"Test Results"`.
+
+**Impact:**
+The check with the old name will not be updated once moved to version 2.
+
+**Workaround to get version 1 behaviour:**
+Add `check_name: "Unit Test Results"` to your config.
+
+### Default value for `comment_title` changed
+Unless `comment_title` or `check_name` are set in your config, the title used to comment on open pull requests changes from `"Unit Test Results"` to `"Test Results"`.
+
+**Impact:**
+Existing comments with the old title will not be updated once moved to version 2, but a new comment is created.
+
+**Workaround to get version 1 behaviour:**
+See workaround for `check_name`.
+
+### Modes `create new` and `update last` removed for option `comment_mode`
+The action always updates an earlier pull request comment, which is the exact behaviour of mode `update last`.
+The [configuration](#configuration) options `create new` and `update last` are therefore removed.
+
+**Impact:**
+An existing pull request comment is always updated.
+
+**Workaround to get version 1 behaviour:**
+Not supported.
+
+### Option `hiding_comments` removed
+The action always updates an earlier pull request comment, so hiding comments is not required anymore.
+
+### Option `comment_on_pr` removed
+Option `comment_on_pr` has been removed.
+
+**Workaround to get version 1 behaviour:**
+Set `comment_mode` to `always` (the default) or `off`.
+
+
+
+
+## Publishing test results
+
+Test results are published on GitHub at various ([configurable](#configuration)) places:
+
+- as [a comment](#pull-request-comment) in related pull requests
+- as [a check](#commit-and-pull-request-checks) in the checks section of a commit and related pull requests
+- as [annotations](#commit-and-pull-request-annotations) in the checks section and changed files section of a commit and related pull requests
+- as [a job summary](#github-actions-job-summary) of the GitHub Actions workflow
+- as [a check summary](#github-actions-check-summary-of-a-commit) in the GitHub Actions section of the commit
+
+### Pull request comment
+
+A comment is posted on pull requests related to the commit.
+
+![pull request comment example](misc/github-pull-request-comment.png)
+
+In presence of failures or errors, the comment links to the respective [check summary](#github-actions-check-summary-of-a-commit) with failure details.
+
+Subsequent runs of the action will update this comment. You can access earlier results in the comment edit history:
+
+![pull request comment history example](misc/github-pull-request-comment-update-history.png)
+
+The result distinguishes between tests and runs. In some situations, tests run multiple times,
+e.g. in different environments. Displaying the number of runs allows spotting unexpected
+changes in the number of runs as well.
+
+When tests run only a single time, no run information is displayed. Results are then shown differently then:
+
+![pull request comment example without runs](misc/github-pull-request-comment-without-runs.png)
+
+The change statistics (e.g. 5 tests ±0) might sometimes hide test removal.
+Those are highlighted in pull request comments to easily spot unintended test removal:
+
+![pull request comment example with test changes](misc/github-pull-request-comment-with-test-changes.png)
+
+***Note:** This requires `check_run_annotations` to be set to `all tests, skipped tests`.*
+
+### Commit and pull request checks
+
+The checks section of a commit and related pull requests list a short summary (here `1 fail, 1 skipped, …`),
+and a link to the [check summary](#github-actions-check-summary-of-a-commit) in the GitHub Actions section (here `Details`):
+
+Commit checks:
+
+![commit checks example](misc/github-checks-commit.png)
+
+Pull request checks:
+
+![pull request checks example](misc/github-pull-request-checks.png)
+
+### Commit and pull request annotations
+
+Each failing test produces an annotation with failure details in the checks section of a commit:
+
+![annotations example check](misc/github-checks-annotation.png)
+
+and the changed files section of related pull requests:
+
+![annotations example changed files](misc/github-pull-request-changes-annotation.png)
+
+***Note:** Annotations for test files are only supported when test file paths in test result files are relative to the repository root.
+Use option `test_file_prefix` to add a prefix to, or remove a prefix from these file paths. See [Configuration](#configuration) section for details.*
+
+***Note:** Only the first failure of a test is shown. If you want to see all failures, set `report_individual_runs: "true"`.*
+
+### GitHub Actions job summary
+
+The results are added to the job summary page of the workflow that runs this action:
+
+![job summary example](misc/github-job-summary-full.png)
+
+In presence of failures or errors, the job summary links to the respective [check summary](#github-actions-check-summary-of-a-commit) with failure details.
+
+***Note:** Job summary requires [GitHub Actions runner v2.288.0](https://github.com/actions/runner/releases/tag/v2.288.0) or above.*
+
+### GitHub Actions check summary of a commit
+
+Test results are published in the GitHub Actions check summary of the respective commit:
+
+![checks comment example](misc/github-checks-comment.png)
+
+## The symbols
+[comment]: <> (This heading is linked to from method get_link_and_tooltip_label_md)
+
+The symbols have the following meaning:
+
+|Symbol|Meaning|
+|:----:|-------|
+||A successful test or run|
+||A skipped test or run|
+||A failed test or run|
+||An erroneous test or run|
+||The duration of all tests or runs|
+
+***Note:*** For simplicity, "disabled" tests count towards "skipped" tests.
+
+## Configuration
+
+Files can be selected via the `files` option. It supports [glob wildcards](https://docs.python.org/3/library/glob.html#glob.glob)
+like `*`, `**`, `?`, and `[]` character ranges. The `**` wildcard matches all files and directories recursively: `./`, `./*/`, `./*/*/`, etc.
+
+You can provide multiple file patterns, one pattern per line. Patterns starting with `!` exclude the matching files.
+There have to be at least one pattern starting without a `!`:
+
+```yaml
+with:
+ files: |
+ *.xml
+ !config.xml
+```
+
+The list of most notable options:
+
+|Option|Default Value|Description|
+|:-----|:-----:|:----------|
+|`files`|_no default_|File patterns of test result files. Relative paths are known to work best, while the composite action [also works with absolute paths](#running-with-absolute-paths). Supports `*`, `**`, `?`, and `[]` character ranges. Use multiline string for multiple patterns. Patterns starting with `!` exclude the matching files. There have to be at least one pattern starting without a `!`.|
+|`check_name`|`"Test Results"`|An alternative name for the check result. Required to be unique for each instance in one workflow.|
+|`comment_title`|same as `check_name`|An alternative name for the pull request comment.|
+|`comment_mode`|`always`|The action posts comments to pull requests that are associated with the commit. Set to:
`always` - always comment
`changes` - comment when changes w.r.t. the target branch exist
`changes in failures` - when changes in the number of failures and errors exist
`changes in errors` - when changes in the number of (only) errors exist
`failures` - when failures or errors exist
`errors` - when (only) errors exist
`off` - to not create pull request comments.|
+|`large_files`|`false` unless
`ignore_runs` is `true`|Support for large files is enabled when set to `true`. Defaults to `false`, unless ignore_runs is `true`.|
+|`ignore_runs`|`false`|Does not collect test run information from the test result files, which is useful for very large files. This disables any check run annotations.|
+
+
+Options related to Git and GitHub
+
+|Option|Default Value|Description|
+|:-----|:-----:|:----------|
+|`commit`|`${{env.GITHUB_SHA}}`|An alternative commit SHA to which test results are published. The `push` and `pull_request`events are handled, but for other [workflow events](https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows#push) `GITHUB_SHA` may refer to different kinds of commits. See [GitHub Workflow documentation](https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows) for details.|
+|`github_token`|`${{github.token}}`|An alternative GitHub token, other than the default provided by GitHub Actions runner.|
+|`github_token_actor`|`github-actions`|The name of the GitHub app that owns the GitHub API Access Token (see github_token). Used to identify pull request comments created by this action during earlier runs. Has to be set when `github_token` is set to a GitHub app installation token (other than GitHub actions). Otherwise, existing comments will not be updated, but new comments created. Note: this does not change the bot name of the pull request comments.|
+|`github_retries`|`10`|Requests to the GitHub API are retried this number of times. The value must be a positive integer or zero.|
+|`seconds_between_github_reads`|`0.25`|Sets the number of seconds the action waits between concurrent read requests to the GitHub API.|
+|`seconds_between_github_writes`|`2.0`|Sets the number of seconds the action waits between concurrent write requests to the GitHub API.|
+|`secondary_rate_limit_wait_seconds`|`60.0`|Sets the number of seconds to wait before retrying secondary rate limit errors. If not set, the default defined in the PyGithub library is used (currently 60 seconds).|
+|`pull_request_build`|`"merge"`|As part of pull requests, GitHub builds a merge commit, which combines the commit and the target branch. If tests ran on the actual pushed commit, then set this to `"commit"`.|
+|`event_file`|`${{env.GITHUB_EVENT_PATH}}`|An alternative event file to use. Useful to replace a `workflow_run` event file with the actual source event file.|
+|`event_name`|`${{env.GITHUB_EVENT_NAME}}`|An alternative event name to use. Useful to replace a `workflow_run` event name with the actual source event name: `${{ github.event.workflow_run.event }}`.|
+|`search_pull_requests`|`false`|Prior to v2.6.0, the action used the `/search/issues` REST API to find pull requests related to a commit. If you need to restore that behaviour, set this to "true". Defaults to `false`.|
+
+
+
+Options related to reporting test results
+
+|Option|Default Value|Description|
+|:-----|:-----:|:----------|
+|`time_unit`|`seconds`|Time values in the test result files have this unit. Supports `seconds` and `milliseconds`.|
+|`test_file_prefix`|`none`|Paths in the test result files should be relative to the git repository for annotations to work best. This prefix is added to (if starting with "+"), or remove from (if starting with "-") test file paths. Examples: "+src/" or "-/opt/actions-runner".|
+|`job_summary`|`true`|Set to `true`, the results are published as part of the [job summary page](https://github.blog/2022-05-09-supercharging-github-actions-with-job-summaries/) of the workflow run.|
+|`compare_to_earlier_commit`|`true`|Test results are compared to results of earlier commits to show changes:
`false` - disable comparison, `true` - compare across commits.'|
+|`test_changes_limit`|`10`|Limits the number of removed or skipped tests reported on pull request comments. This report can be disabled with a value of `0`.|
+|`report_individual_runs`|`false`|Individual runs of the same test may see different failures. Reports all individual failures when set `true`, and the first failure only otherwise.|
+|`report_suite_logs`|`none`|In addition to reporting regular test logs, also report test suite logs. These are logs provided on suite level, not individual test level. Set to `info` for normal output, `error` for error output, `any` for both, or `none` for no suite logs at all. Defaults to `none`.|
+|`deduplicate_classes_by_file_name`|`false`|De-duplicates classes with same name by their file name when set `true`, combines test results for those classes otherwise.|
+|`check_run_annotations`|`all tests, skipped tests`|Adds additional information to the check run. This is a comma-separated list of any of the following values:
`all tests` - list all found tests,
`skipped tests` - list all skipped tests
Set to `none` to add no extra annotations at all.|
+|`check_run_annotations_branch`|`event.repository.default_branch` or `"main, master"`|Adds check run annotations only on given branches. If not given, this defaults to the default branch of your repository, e.g. `main` or `master`. Comma separated list of branch names allowed, asterisk `"*"` matches all branches. Example: `main, master, branch_one`.|
+|`json_file`|no file|Results are written to this JSON file.|
+|`json_thousands_separator`|`" "`|Formatted numbers in JSON use this character to separate groups of thousands. Common values are "," or ".". Defaults to punctuation space (\u2008).|
+|`json_suite_details`|`false`|Write out all suite details to the JSON file. Setting this to `true` can greatly increase the size of the output. Defaults to `false`.|
+|`json_test_case_results`|`false`|Write out all individual test case results to the JSON file. Setting this to `true` can greatly increase the size of the output. Defaults to `false`.|
+|`fail_on`|`"test failures"`|Configures the state of the created test result check run. With `"test failures"` it fails if any test fails or test errors occur. It never fails when set to `"nothing"`, and fails only on errors when set to `"errors"`.|
+|`action_fail`|`false`|When set `true`, the action itself fails when tests have failed (see `fail_on`).|
+|`action_fail_on_inconclusive`|`false`|When set `true`, the action itself fails when tests are inconclusive (no test results).|
+
+Pull request comments highlight removal of tests or tests that the pull request moves into skip state.
+Those removed or skipped tests are added as a list, which is limited in length by `test_changes_limit`,
+which defaults to `10`. Reporting these tests can be disabled entirely by setting this limit to `0`.
+This feature requires `check_run_annotations` to contain `all tests` in order to detect test addition
+and removal, and `skipped tests` to detect new skipped and un-skipped tests, as well as
+`check_run_annotations_branch` to contain your default branch.
+
+
+## JSON result
+
+The gathered test information are accessible as JSON via [GitHub Actions steps outputs](https://docs.github.com/en/actions/learn-github-actions/contexts#steps-context) string or JSON file.
+
+
+Access JSON via step outputs
+
+The `json` output of the action can be accessed through the expression `steps..outputs.json`.
+
+```yaml
+- name: Publish Test Results
+ uses: step-security/publish-unit-test-result-action@v1
+ id: test-results
+ if: always()
+ with:
+ files: "test-results/**/*.xml"
+
+- name: Conclusion
+ run: echo "Conclusion is ${{ fromJSON( steps.test-results.outputs.json ).conclusion }}"
+```
+
+Here is an example JSON:
+```json
+{
+ "title": "4 parse errors, 4 errors, 23 fail, 18 skipped, 227 pass in 39m 12s",
+ "summary": " 24 files ±0 4 errors 21 suites ±0 39m 12s [:stopwatch:](https://github.com/step-security/publish-unit-test-result-action \"duration of all tests\") ±0s\n272 tests ±0 227 [:heavy_check_mark:](https://github.com/step-security/publish-unit-test-result-action \"passed tests\") ±0 18 [:zzz:](https://github.com/step-security/publish-unit-test-result-action \"skipped / disabled tests\") ±0 23 [:x:](https://github.com/step-security/publish-unit-test-result-action \"failed tests\") ±0 4 [:fire:](https://github.com/step-security/publish-unit-test-result-action \"test errors\") ±0 \n437 runs ±0 354 [:heavy_check_mark:](https://github.com/step-security/publish-unit-test-result-action \"passed tests\") ±0 53 [:zzz:](https://github.com/step-security/publish-unit-test-result-action \"skipped / disabled tests\") ±0 25 [:x:](https://github.com/step-security/publish-unit-test-result-action \"failed tests\") ±0 5 [:fire:](https://github.com/step-security/publish-unit-test-result-action \"test errors\") ±0 \n\nResults for commit 11c02e56. ± Comparison against earlier commit d8ce4b6c.\n",
+ "conclusion": "success",
+ "stats": {
+ "files": 24,
+ "errors": 4,
+ "suites": 21,
+ "duration": 2352,
+ "tests": 272,
+ "tests_succ": 227,
+ "tests_skip": 18,
+ "tests_fail": 23,
+ "tests_error": 4,
+ "runs": 437,
+ "runs_succ": 354,
+ "runs_skip": 53,
+ "runs_fail": 25,
+ "runs_error": 5,
+ "commit": "11c02e561e0eb51ee90f1c744c0ca7f306f1f5f9"
+ },
+ "stats_with_delta": {
+ "files": {
+ "number": 24,
+ "delta": 0
+ },
+ …,
+ "commit": "11c02e561e0eb51ee90f1c744c0ca7f306f1f5f9",
+ "reference_type": "earlier",
+ "reference_commit": "d8ce4b6c62ebfafe1890c55bf7ea30058ebf77f2"
+ },
+ "formatted": {
+ "stats": {
+ "duration": "2 352",
+ …
+ },
+ "stats_with_delta": {
+ "duration": {
+ "number": "2 352",
+ "delta": "+12"
+ },
+ …
+ }
+ },
+ "annotations": 31
+}
+```
+
+The `formatted` key provides a copy of `stats` and `stats_with_delta`, where numbers are formatted to strings.
+For example, `"duration": 2352` is formatted as `"duration": "2 352"`. The thousands separator can be configured
+via `json_thousands_separator`. Formatted numbers are especially useful when those values are used where formatting
+is not easily available, e.g. when [creating a badge from test results](#create-a-badge-from-test-results).
+
+
+
+
+Access JSON via file
+
+The optional `json_file` allows to [configure](#configuration) a file where extended JSON information are to be written.
+Compared to `"Access JSON via step outputs"` above, `errors` and `annotations` contain more information
+than just the number of errors and annotations, respectively.
+
+Additionally, `json_test_case_results` can be enabled to add the `cases` field to the JSON file, which provides
+all test results of all tests. Enabling this may greatly increase the output size of the JSON file.
+
+```json
+{
+ …,
+ "stats": {
+ …,
+ "errors": [
+ {
+ "file": "test-files/empty.xml",
+ "message": "File is empty.",
+ "line": null,
+ "column": null
+ }
+ ],
+ …
+ },
+ …,
+ "annotations": [
+ {
+ "path": "test/test.py",
+ "start_line": 819,
+ "end_line": 819,
+ "annotation_level": "warning",
+ "message": "test-files/junit.fail.xml",
+ "title": "1 out of 3 runs failed: test_events (test.Tests)",
+ "raw_details": "self = \n\n def test_events(self):\n > self.do_test_events(3)\n\n test.py:821:\n _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\n test.py:836: in do_test_events\n self.do_test_rsh(command, 143, events=events)\n test.py:852: in do_test_rsh\n self.assertEqual(expected_result, res)\n E AssertionError: 143 != 0\n "
+ }
+ ],
+ …,
+ "cases": [
+ {
+ "class_name": "test.test_spark_keras.SparkKerasTests",
+ "test_name": "test_batch_generator_fn",
+ "states": {
+ "success": [
+ {
+ "result_file": "test-files/junit-xml/pytest/junit.spark.integration.1.xml",
+ "test_file": "test/test_spark_keras.py",
+ "line": 454,
+ "class_name": "test.test_spark_keras.SparkKerasTests",
+ "test_name": "test_batch_generator_fn",
+ "result": "success",
+ "time": 0.006
+ },
+ {
+ "result_file": "test-files/junit-xml/pytest/junit.spark.integration.2.xml",
+ "test_file": "test/test_spark_keras.py",
+ "line": 454,
+ "class_name": "test.test_spark_keras.SparkKerasTests",
+ "test_name": "test_batch_generator_fn",
+ "result": "success",
+ "time": 0.006
+ }
+ ]
+ }
+ },
+ …
+ ],
+ …
+}
+```
+
+
+
+See [Create a badge from test results](#create-a-badge-from-test-results) for an example on how to create a badge from this JSON.
+
+## Use with matrix strategy
+
+In a scenario where your tests run multiple times in different environments (e.g. a [strategy matrix](https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idstrategymatrix)),
+the action should run only once over all test results. For this, put the action into a separate job
+that depends on all your test environments. Those need to upload the test results as artifacts, which
+are then all downloaded by your publish job.
+
+
+Example workflow YAML
+
+```yaml
+name: CI
+
+on: [push]
+permissions: {}
+
+jobs:
+ build-and-test:
+ name: Build and Test (Python ${{ matrix.python-version }})
+ runs-on: ubuntu-latest
+
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version: [3.6, 3.7, 3.8]
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Setup Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: PyTest
+ run: python -m pytest test --junit-xml pytest.xml
+
+ - name: Upload Test Results
+ if: always()
+ uses: actions/upload-artifact@v3
+ with:
+ name: Test Results (Python ${{ matrix.python-version }})
+ path: pytest.xml
+
+ publish-test-results:
+ name: "Publish Tests Results"
+ needs: build-and-test
+ runs-on: ubuntu-latest
+ permissions:
+ checks: write
+
+ # only needed unless run with comment_mode: off
+ pull-requests: write
+
+ # only needed for private repository
+ contents: read
+
+ # only needed for private repository
+ issues: read
+ if: always()
+
+ steps:
+ - name: Download Artifacts
+ uses: actions/download-artifact@v3
+ with:
+ path: artifacts
+
+ - name: Publish Test Results
+ uses: step-security/publish-unit-test-result-action@v1
+ with:
+ files: "artifacts/**/*.xml"
+```
+
+
+Please consider to [support fork repositories and dependabot branches](#support-fork-repositories-and-dependabot-branches)
+together with your matrix strategy.
+
+## Support fork repositories and dependabot branches
+[comment]: <> (This heading is linked to from main method in publish_unit_test_results.py)
+
+Getting test results of pull requests created by contributors from fork repositories or by
+[Dependabot](https://docs.github.com/en/github/administering-a-repository/keeping-your-dependencies-updated-automatically)
+requires some additional setup. Without this, the action will fail with the
+`"Resource not accessible by integration"` error for those situations.
+
+In this setup, your CI workflow does not need to publish test results anymore as they are **always** published from a separate workflow.
+
+1. Your CI workflow has to upload the GitHub event file and test result files.
+2. Set up an additional workflow on `workflow_run` events, which starts on completion of the CI workflow,
+ downloads the event file and the test result files, and runs this action on them.
+ This workflow publishes the test results for pull requests from fork repositories and dependabot,
+ as well as all "ordinary" runs of your CI workflow.
+
+
+Step-by-step instructions
+
+1. Add the following job to your CI workflow to upload the event file as an artifact:
+
+```yaml
+event_file:
+ name: "Event File"
+ runs-on: ubuntu-latest
+ steps:
+ - name: Upload
+ uses: actions/upload-artifact@v3
+ with:
+ name: Event File
+ path: ${{ github.event_path }}
+```
+
+2. Add the following action step to your CI workflow to upload test results as artifacts.
+Adjust the value of `path` to fit your setup:
+
+```yaml
+- name: Upload Test Results
+ if: always()
+ uses: actions/upload-artifact@v3
+ with:
+ name: Test Results
+ path: |
+ test-results/*.xml
+```
+
+3. If you run tests in a [strategy matrix](https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idstrategymatrix),
+make the artifact name unique for each job, e.g.:
+```yaml
+ with:
+ name: Test Results (${{ matrix.python-version }})
+ path: …
+```
+
+4. Add the following workflow that publishes test results. It downloads and extracts
+all artifacts into `artifacts/ARTIFACT_NAME/`, where `ARTIFACT_NAME` will be `Upload Test Results`
+when setup as above, or `Upload Test Results (…)` when run in a strategy matrix.
+
+ It then runs the action on files matching `artifacts/**/*.xml`.
+Change the `files` pattern with the path to your test artifacts if it does not work for you.
+The publish action uses the event file of the CI workflow.
+
+ Also adjust the value of `workflows` (here `"CI"`) to fit your setup:
+
+```yaml
+name: Test Results
+
+on:
+ workflow_run:
+ workflows: ["CI"]
+ types:
+ - completed
+permissions: {}
+
+jobs:
+ test-results:
+ name: Test Results
+ runs-on: ubuntu-latest
+ if: github.event.workflow_run.conclusion != 'skipped'
+
+ permissions:
+ checks: write
+
+ # needed unless run with comment_mode: off
+ pull-requests: write
+
+ # only needed for private repository
+ contents: read
+
+ # only needed for private repository
+ issues: read
+
+ # required by download step to access artifacts API
+ actions: read
+
+ steps:
+ - name: Download and Extract Artifacts
+ uses: dawidd6/action-download-artifact@246dbf436b23d7c49e21a7ab8204ca9ecd1fe615
+ with:
+ run_id: ${{ github.event.workflow_run.id }}
+ path: artifacts
+
+ - name: Publish Test Results
+ uses: step-security/publish-unit-test-result-action@v1
+ with:
+ commit: ${{ github.event.workflow_run.head_sha }}
+ event_file: artifacts/Event File/event.json
+ event_name: ${{ github.event.workflow_run.event }}
+ files: "artifacts/**/*.xml"
+```
+
+Note: Running this action on `pull_request_target` events is [dangerous if combined with code checkout and code execution](https://securitylab.github.com/research/github-actions-preventing-pwn-requests).
+This event is therefore not use here intentionally!
+
+
+## Running with multiple event types (pull_request, push, schedule, …)
+
+This action comments on a pull request each time it is executed via any event type.
+When run for more than one event type, runs will overwrite earlier pull request comments.
+
+Note that `pull_request` events may produce different test results than any other event type.
+The `pull_request` event runs the workflow on a merge commit, i.e. the commit merged into the target branch.
+All other event types run on the commit itself.
+
+If you want to distinguish between test results from `pull_request` and `push`, or want to distinguish the original test results
+of the `push` to master from subsequent `schedule` events, you may want to add the following to your workflow.
+
+
+There are two possible ways to avoid the publish action to overwrite results from other event types:
+
+### Test results per event type
+
+Add the event name to `check_name` to avoid different event types overwriting each other's results:
+
+```yaml
+- name: Publish Test Results
+ uses: step-security/publish-unit-test-result-action@v1
+ if: always()
+ with:
+ check_name: "Test Results (${{ github.event.workflow_run.event || github.event_name }})"
+ files: "test-results/**/*.xml"
+```
+
+### Pull request comments only for pull_request events
+
+Disabling the pull request comment mode (`"off"`) for events other than `pull_request` avoids that any other event type overwrites pull request comments:
+
+```yaml
+- name: Publish Test Results
+ uses: step-security/publish-unit-test-result-action@v1
+ if: always()
+ with:
+ # set comment_mode to "always" for pull_request event, set to "off" for all other event types
+ comment_mode: ${{ (github.event.workflow_run.event == 'pull_request' || github.event_name == 'pull_request') && 'always' || 'off' }}
+ files: "test-results/**/*.xml"
+```
+
+
+## Create a badge from test results
+
+
+Example workflow YAML
+
+```yaml
+steps:
+- …
+- name: Publish Test Results
+ uses: step-security/publish-unit-test-result-action@v1
+ id: test-results
+ if: always()
+ with:
+ files: "test-results/**/*.xml"
+
+- name: Set badge color
+ shell: bash
+ run: |
+ case ${{ fromJSON( steps.test-results.outputs.json ).conclusion }} in
+ success)
+ echo "BADGE_COLOR=31c653" >> $GITHUB_ENV
+ ;;
+ failure)
+ echo "BADGE_COLOR=800000" >> $GITHUB_ENV
+ ;;
+ neutral)
+ echo "BADGE_COLOR=696969" >> $GITHUB_ENV
+ ;;
+ esac
+
+- name: Create badge
+ uses: emibcn/badge-action@d6f51ff11b5c3382b3b88689ae2d6db22d9737d1
+ with:
+ label: Tests
+ status: '${{ fromJSON( steps.test-results.outputs.json ).formatted.stats.tests }} tests, ${{ fromJSON( steps.test-results.outputs.json ).formatted.stats.runs }} runs: ${{ fromJSON( steps.test-results.outputs.json ).conclusion }}'
+ color: ${{ env.BADGE_COLOR }}
+ path: badge.svg
+
+- name: Upload badge to Gist
+ # Upload only for master branch
+ if: >
+ github.event_name == 'workflow_run' && github.event.workflow_run.head_branch == 'master' ||
+ github.event_name != 'workflow_run' && github.ref == 'refs/heads/master'
+ uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d
+ with:
+ token: ${{ secrets.GIST_TOKEN }}
+ gistURL: https://gist.githubusercontent.com/{user}/{id}
+ file: badge.svg
+```
+
+You have to create a personal access toke (PAT) with `gist` permission only. Add it to your GitHub Actions secrets, in above example with secret name `GIST_TOKEN`.
+
+Set the `gistURL` to the Gist that you want to write the badge file to, in the form of `https://gist.githubusercontent.com/{user}/{id}`.
+
+You can then use the badge via this URL: https://gist.githubusercontent.com/{user}/{id}/raw/badge.svg
+
+
+## Running with absolute paths
+
+It is known that this action works best with relative paths (e.g. `test-results/**/*.xml`),
+but most absolute paths (e.g. `/tmp/test-results/**/*.xml`) require to use the composite variant
+of this action (`uses: step-security/publish-unit-test-result-action/composite@v1`).
+
+If you have to use absolute paths with the non-composite variant of this action (`uses: step-security/publish-unit-test-result-action@v1`),
+you have to copy files to a relative path first, and then use the relative path:
+
+```yaml
+- name: Copy Test Results
+ if: always()
+ run: |
+ cp -Lpr /tmp/test-results test-results
+ shell: bash
+
+- name: Publish Test Results
+ uses: step-security/publish-unit-test-result-action@v1
+ if: always()
+ with:
+ files: |
+ test-results/**/*.xml
+ test-results/**/*.trx
+ test-results/**/*.json
+```
+
+Using the non-composite variant of this action is recommended as it starts up much quicker.
+
+## Running as a composite action
+
+Running this action as a composite action allows to run it on various operating systems as it
+does not require Docker. The composite action, however, requires a Python3 environment to be setup
+on the action runner. All GitHub-hosted runners (Ubuntu, Windows Server and macOS) provide a suitable
+Python3 environment out-of-the-box.
+
+Self-hosted runners may require setting up a Python environment first:
+
+```yaml
+- name: Setup Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: 3.8
+```
+
+Self-hosted runners for Windows require Bash shell to be installed. Easiest way to have one is by installing
+Git for Windows, which comes with Git BASH. Make sure that the location of `bash.exe` is part of the `PATH`
+environment variable seen by the self-hosted runner.
diff --git a/action.yml b/action.yml
new file mode 100644
index 0000000..181f02e
--- /dev/null
+++ b/action.yml
@@ -0,0 +1,150 @@
+name: 'Publish Test Results'
+description: 'Publishes JUnit, NUnit, XUnit, TRX, JSON test results on GitHub for .NET, Dart, Java, JS, Jest, Mocha, Python, Scala, …'
+
+inputs:
+ github_token:
+ description: 'GitHub API Access Token.'
+ default: ${{ github.token }}
+ required: false
+ github_token_actor:
+ description: 'The name of the GitHub app that owns the GitHub API Access Token (see github_token). Used to identify pull request comments created by this action during earlier runs. Has to be set when `github_token` is set to a GitHub app installation token (other than GitHub actions). Otherwise, existing comments will not be updated, but new comments created. Note: this does not change the bot name of the pull request comments. Defaults to "github-actions".'
+ default: 'github-actions'
+ required: false
+ github_retries:
+ description: 'Requests to the GitHub API are retried this number of times. The value must be a positive integer or zero.'
+ default: '10'
+ required: false
+ commit:
+ description: 'Commit SHA to which test results are published. Only needed if the value of GITHUB_SHA does not work for you.'
+ required: false
+ check_name:
+ description: 'Name of the created check run.'
+ default: 'Test Results'
+ required: false
+ comment_title:
+ description: 'An alternative title for the pull request comment. Defaults to value of check_name input.'
+ required: false
+ comment_mode:
+ description: 'The action posts comments to pull requests that are associated with the commit. Set to "always" - always comment, "changes" - comment when changes w.r.t. the target branch exist, "changes in failures" - when changes in the number of failures and errors exist, "changes in errors" - when changes in the number of (only) errors exist, "failures" - when failures or errors exist, "errors" - when (only) errors exist, "off" - to not create pull request comments.'
+ default: 'always'
+ required: false
+ fail_on:
+ description: 'The created test result check run has failure state if any test fails or test errors occur. Never fails when set to "nothing", fails only on errors when set to "errors". Default is "test failures".'
+ default: 'test failures'
+ required: false
+ action_fail:
+ description: 'When set "true", the action itself fails when tests have failed (see option fail_on).'
+ default: 'false'
+ required: false
+ action_fail_on_inconclusive:
+ description: 'When set "true", the action itself fails when tests are inconclusive (no test results).'
+ default: 'false'
+ required: false
+ files:
+ description: 'File patterns of test result files. Relative paths are known to work best, while the composite action also works with absolute paths. Supports "*", "**", "?", and "[]" character ranges. Use multiline string for multiple patterns. Patterns starting with "!" exclude the matching files. There have to be at least one pattern starting without a "!".'
+ required: false
+ junit_files:
+ description: 'Deprecated, use "files" option instead.'
+ required: false
+ nunit_files:
+ description: 'Deprecated, use "files" option instead.'
+ required: false
+ xunit_files:
+ description: 'Deprecated, use "files" option instead.'
+ required: false
+ trx_files:
+ description: 'Deprecated, use "files" option instead.'
+ required: false
+ time_unit:
+ description: 'Time values in the test result files have this unit. Supports "seconds" and "milliseconds".'
+ default: 'seconds'
+ required: false
+ test_file_prefix:
+ description: 'Paths in the test result files should be relative to the git repository for annotations to work best. This prefix is added to (if starting with "+"), or remove from (if starting with "-") test file paths. Examples: "+src/" or "-/opt/actions-runner".'
+ required: false
+ report_individual_runs:
+ description: 'Individual runs of the same test may see different failures. Reports all individual failures when set "true" or the first only otherwise.'
+ required: false
+ report_suite_logs:
+ description: 'In addition to reporting regular test logs, also report test suite logs. These are logs provided on suite level, not individual test level. Set to "info" for normal output, "error" for error output, "any" for both, or "none" for no suite logs at all. Defaults to "none".'
+ default: 'none'
+ required: false
+ deduplicate_classes_by_file_name:
+ description: 'De-duplicates classes with same name by their file name when set "true", combines test results for those classes otherwise.'
+ required: false
+ large_files:
+ description: 'Support for large files is enabled when set to "true". Defaults to "false", unless ignore_runs is "true".'
+ required: false
+ ignore_runs:
+ description: 'Does not collect test run information from the test result files, which is useful for very large files. This disables any check run annotations.'
+ default: 'false'
+ required: false
+ job_summary:
+ description: 'Set to "true", the results are published as part of the job summary page of the workflow run.'
+ default: 'true'
+ required: false
+ compare_to_earlier_commit:
+ description: 'Test results are compared to results of earlier commits to highlight changes: "false" - disable comparison, "true" - compare across commits.'
+ default: 'true'
+ required: false
+ pull_request_build:
+ description: 'As part of pull requests, GitHub builds a merge commit, which combines the commit and the target branch. If tests ran on the actual pushed commit, then set this to "commit". Defaults to "merge".'
+ default: 'merge'
+ required: false
+ event_file:
+ description: 'An alternative event file to use. Useful to replace a "workflow_run" event file with the actual source event file.'
+ required: false
+ event_name:
+ description: 'An alternative event name to use. Useful to replace a "workflow_run" event name with the actual source event name: github.event.workflow_run.event.'
+ required: false
+ test_changes_limit:
+ description: 'Limits the number of removed or skipped tests reported on pull request comments. This report can be disabled with a value of 0. The default is 10.'
+ required: false
+ check_run_annotations:
+ description: 'Adds additional information to the check run. This is a comma-separated list of any of the following values: "all tests" - list all found tests, "skipped tests" - list all skipped tests. Set to "none" to add no extra annotations at all.'
+ default: 'all tests, skipped tests'
+ required: false
+ check_run_annotations_branch:
+ description: 'Adds check run annotations only on given branches. Comma-separated list of branch names allowed, asterisk "*" matches all branches. Defaults to event.repository.default_branch or "main, master".'
+ required: false
+ seconds_between_github_reads:
+ description: 'Sets the number of seconds the action waits between concurrent read requests to the GitHub API. This throttles the API usage to avoid abuse rate limits: https://docs.github.com/en/rest/overview/resources-in-the-rest-api#abuse-rate-limits.'
+ default: '0.25'
+ required: false
+ seconds_between_github_writes:
+ description: 'Sets the number of seconds the action waits between concurrent write requests to the GitHub API. This throttles the API usage to avoid abuse rate limits: https://docs.github.com/en/rest/overview/resources-in-the-rest-api#abuse-rate-limits.'
+ default: '2.0'
+ required: false
+ secondary_rate_limit_wait_seconds:
+ description: 'Sets the number of seconds to wait before retrying secondary rate limit errors. If not set, the default defined in the PyGithub library is used (currently 60 seconds).'
+ required: false
+ json_file:
+ description: 'Results are written to this JSON file.'
+ required: false
+ json_thousands_separator:
+ description: 'Formatted numbers in JSON use this character to separate groups of thousands. Common values are "," or ".". Defaults to punctuation space (\u2008).'
+ default: ' '
+ required: false
+ json_suite_details:
+ description: 'Write out all suite details to the JSON file. Setting this to "true" can greatly increase the size of the output. Defaults to "false".'
+ default: 'false'
+ required: false
+ json_test_case_results:
+ description: 'Write out all individual test case results to the JSON file. Setting this to "true" can greatly increase the size of the output. Defaults to "false".'
+ default: 'false'
+ required: false
+ search_pull_requests:
+ description: 'Prior to v2.6.0, the action used the "/search/issues" REST API to find pull requests related to a commit. If you need to restore that behaviour, set this to "true". Defaults to "false".'
+ default: 'false'
+ required: false
+outputs:
+ json:
+ description: "Test results as JSON"
+
+runs:
+ using: 'docker'
+ image: 'docker://ghcr.io/step-security.io/publish-unit-test-result-action:v1.0.0'
+
+branding:
+ icon: 'check-square'
+ color: 'green'
\ No newline at end of file
diff --git a/composite/action.yml b/composite/action.yml
new file mode 100644
index 0000000..3164bec
--- /dev/null
+++ b/composite/action.yml
@@ -0,0 +1,313 @@
+name: 'Publish Test Results'
+description: 'Publishes JUnit, NUnit, XUnit, TRX, JSON test results on GitHub for .NET, Dart, Java, JS, Jest, Mocha, Python, Scala, …'
+
+inputs:
+ github_token:
+ description: 'GitHub API Access Token.'
+ default: ${{ github.token }}
+ required: false
+ github_token_actor:
+ description: 'The name of the GitHub app that owns the GitHub API Access Token (see github_token). Used to identify pull request comments created by this action during earlier runs. Has to be set when `github_token` is set to a GitHub app installation token (other than GitHub actions). Otherwise, existing comments will not be updated, but new comments created. Note: this does not change the bot name of the pull request comments. Defaults to "github-actions".'
+ default: 'github-actions'
+ required: false
+ github_retries:
+ description: 'Requests to the GitHub API are retried this number of times. The value must be a positive integer or zero.'
+ default: '10'
+ required: false
+ commit:
+ description: 'Commit SHA to which test results are published. Only needed if the value of GITHUB_SHA does not work for you.'
+ required: false
+ check_name:
+ description: 'Name of the created check run.'
+ default: 'Test Results'
+ required: false
+ comment_title:
+ description: 'An alternative title for the pull request comment. Defaults to value of check_name input.'
+ required: false
+ comment_mode:
+ description: 'The action posts comments to pull requests that are associated with the commit. Set to "always" - always comment, "changes" - comment when changes w.r.t. the target branch exist, "changes in failures" - when changes in the number of failures and errors exist, "changes in errors" - when changes in the number of (only) errors exist, "failures" - when failures or errors exist, "errors" - when (only) errors exist, "off" - to not create pull request comments.'
+ default: 'always'
+ required: false
+ fail_on:
+ description: 'The created test result check run has failure state if any test fails or test errors occur. Never fails when set to "nothing", fails only on errors when set to "errors". Default is "test failures".'
+ default: 'test failures'
+ required: false
+ action_fail:
+ description: 'When set "true", the action itself fails when tests have failed (see option fail_on).'
+ default: 'false'
+ required: false
+ action_fail_on_inconclusive:
+ description: 'When set "true", the action itself fails when tests are inconclusive (no test results).'
+ default: 'false'
+ required: false
+ files:
+ description: 'File patterns of test result files. Relative paths are known to work best, while the composite action also works with absolute paths. Supports "*", "**", "?", and "[]" character ranges. Use multiline string for multiple patterns. Patterns starting with "!" exclude the matching files. There have to be at least one pattern starting without a "!".'
+ required: false
+ junit_files:
+ description: 'Deprecated, use "files" option instead.'
+ required: false
+ nunit_files:
+ description: 'Deprecated, use "files" option instead.'
+ required: false
+ xunit_files:
+ description: 'Deprecated, use "files" option instead.'
+ required: false
+ trx_files:
+ description: 'Deprecated, use "files" option instead.'
+ required: false
+ time_unit:
+ description: 'Time values in the test result files have this unit. Supports "seconds" and "milliseconds".'
+ default: 'seconds'
+ required: false
+ test_file_prefix:
+ description: 'Paths in the test result files should be relative to the git repository for annotations to work best. This prefix is added to (if starting with "+"), or remove from (if starting with "-") test file paths. Examples: "+src/" or "-/opt/actions-runner".'
+ required: false
+ report_individual_runs:
+ description: 'Individual runs of the same test may see different failures. Reports all individual failures when set "true" or the first only otherwise.'
+ required: false
+ report_suite_logs:
+ description: 'In addition to reporting regular test logs, also report test suite logs. These are logs provided on suite level, not individual test level. Set to "info" for normal output, "error" for error output, "any" for both, or "none" for no suite logs at all. Defaults to "none".'
+ default: 'none'
+ required: false
+ deduplicate_classes_by_file_name:
+ description: 'De-duplicates classes with same name by their file name when set "true", combines test results for those classes otherwise.'
+ required: false
+ large_files:
+ description: 'Support for large files is enabled when set to "true". Defaults to "false", unless ignore_runs is "true".'
+ required: false
+ ignore_runs:
+ description: 'Does not collect test run information from the test result files, which is useful for very large files. This disables any check run annotations.'
+ default: 'false'
+ required: false
+ job_summary:
+ description: 'Set to "true", the results are published as part of the job summary page of the workflow run.'
+ default: 'true'
+ required: false
+ compare_to_earlier_commit:
+ description: 'Test results are compared to results of earlier commits to highlight changes: "false" - disable comparison, "true" - compare across commits.'
+ default: 'true'
+ required: false
+ pull_request_build:
+ description: 'As part of pull requests, GitHub builds a merge commit, which combines the commit and the target branch. If tests ran on the actual pushed commit, then set this to "commit". Defaults to "merge".'
+ default: 'merge'
+ required: false
+ event_file:
+ description: 'An alternative event file to use. Useful to replace a "workflow_run" event file with the actual source event file.'
+ required: false
+ event_name:
+ description: 'An alternative event name to use. Useful to replace a "workflow_run" event name with the actual source event name: github.event.workflow_run.event.'
+ required: false
+ test_changes_limit:
+ description: 'Limits the number of removed or skipped tests reported on pull request comments. This report can be disabled with a value of 0. The default is 10.'
+ required: false
+ check_run_annotations:
+ description: 'Adds additional information to the check run. This is a comma-separated list of any of the following values: "all tests" - list all found tests, "skipped tests" - list all skipped tests. Set to "none" to add no extra annotations at all.'
+ default: 'all tests, skipped tests'
+ required: false
+ check_run_annotations_branch:
+ description: 'Adds check run annotations only on given branches. Comma-separated list of branch names allowed, asterisk "*" matches all branches. Defaults to event.repository.default_branch or "main, master".'
+ required: false
+ seconds_between_github_reads:
+ description: 'Sets the number of seconds the action waits between concurrent read requests to the GitHub API. This throttles the API usage to avoid abuse rate limits: https://docs.github.com/en/rest/overview/resources-in-the-rest-api#abuse-rate-limits.'
+ default: '0.25'
+ required: false
+ seconds_between_github_writes:
+ description: 'Sets the number of seconds the action waits between concurrent write requests to the GitHub API. This throttles the API usage to avoid abuse rate limits: https://docs.github.com/en/rest/overview/resources-in-the-rest-api#abuse-rate-limits.'
+ default: '2.0'
+ required: false
+ secondary_rate_limit_wait_seconds:
+ description: 'Sets the number of seconds to wait before retrying secondary rate limit errors. If not set, the default defined in the PyGithub library is used (currently 60 seconds).'
+ required: false
+ json_file:
+ description: 'Results are written to this JSON file.'
+ required: false
+ json_thousands_separator:
+ description: 'Formatted numbers in JSON use this character to separate groups of thousands. Common values are "," or ".". Defaults to punctuation space (\u2008).'
+ default: ' '
+ required: false
+ json_suite_details:
+ description: 'Write out all suite details to the JSON file. Setting this to "true" can greatly increase the size of the output. Defaults to "false".'
+ default: 'false'
+ required: false
+ json_test_case_results:
+ description: 'Write out all individual test case results to the JSON file. Setting this to "true" can greatly increase the size of the output. Defaults to "false".'
+ default: 'false'
+ required: false
+ search_pull_requests:
+ description: 'Prior to v2.6.0, the action used the "/search/issues" REST API to find pull requests related to a commit. If you need to restore that behaviour, set this to "true". Defaults to "false".'
+ default: 'false'
+ required: false
+
+outputs:
+ json:
+ description: "Test results as JSON"
+ value: ${{ steps.test-results.outputs.json }}
+
+runs:
+ using: 'composite'
+ steps:
+ - name: Check for Python3
+ id: python
+ run: |
+ echo '##[group]Check for Python3'
+ # we check version here just to execute `python3` with an argument
+ # on Windows, there is a `python3.exe` that is a proxy to trigger installation from app store
+ # command `which python3` finds that, but `python3 -V` does not return the version on stdout
+ if ! which python3 || [[ $(python3 -V) != *"python 3."* && $(python3 -V) != *"Python 3."* ]]
+ then
+ if ! which python || [[ $(python -V) != *"python 3."* && $(python -V) != *"Python 3."* ]]
+ then
+ echo "::error::No python3 interpreter found. Please setup python before running this action. You could use https://github.com/actions/setup-python."
+ exit 1
+ fi
+
+ interpreter="$(which python)"
+ if [[ ! -e "${interpreter}3" ]]
+ then
+ mkdir -p "$RUNNER_TEMP/bin/"
+ ln -s "$interpreter" "$RUNNER_TEMP/bin/python3"
+ echo "$RUNNER_TEMP/bin" >> $GITHUB_PATH
+ fi
+ fi
+ echo "version=$(python3 -V)" >> $GITHUB_OUTPUT
+ echo '##[endgroup]'
+ shell: bash
+
+ - name: Detect OS
+ id: os
+ run: |
+ case "$RUNNER_OS" in
+ Linux*)
+ echo "pip-cache=~/.cache/pip" >> $GITHUB_OUTPUT
+ ;;
+ macOS*)
+ echo "pip-cache=~/Library/Caches/pip" >> $GITHUB_OUTPUT
+ ;;
+ Windows*)
+ echo "pip-cache=~\\AppData\\Local\\pip\\Cache" >> $GITHUB_OUTPUT
+ echo "pip-options=--user" >> $GITHUB_OUTPUT
+ ;;
+ esac
+ shell: bash
+
+ - name: Restore PIP packages cache
+ uses: actions/cache/restore@v3
+ id: cache
+ continue-on-error: true
+ with:
+ path: ${{ steps.os.outputs.pip-cache }}
+ key: step-security-publish-action-${{ runner.os }}-${{ runner.arch }}-pip-${{ steps.python.outputs.version }}-df386fe4e04a72c96e140f0566a5c849
+
+ - name: Create virtualenv
+ id: venv
+ continue-on-error: true
+ env:
+ PIP_OPTIONS: ${{ steps.os.outputs.pip-options }}
+ run: |
+ echo '##[group]Create virtualenv'
+ # install virtualenv, if it is not yet installed
+ python3 -m pip install $PIP_OPTIONS virtualenv
+ python3 -m virtualenv step-security-publish-action-venv
+ # test activating virtualenv
+ case "$RUNNER_OS" in
+ Linux*|macOS*)
+ source step-security-publish-action-venv/bin/activate;;
+ Windows*)
+ source step-security-publish-action-venv\\Scripts\\activate;;
+ esac
+ which python3
+ echo '##[endgroup]'
+ shell: bash
+
+ - name: Install Python dependencies
+ env:
+ PIP_OPTIONS: ${{ steps.os.outputs.pip-options }}
+ run: |
+ echo '##[group]Install Python dependencies'
+ if [ "${{ steps.venv.outcome }}" == "success" ]
+ then
+ # activate virtualenv
+ case "$RUNNER_OS" in
+ Linux*|macOS*)
+ source step-security-publish-action-venv/bin/activate;;
+ Windows*)
+ source step-security-publish-action-venv\\Scripts\\activate;;
+ esac
+ fi
+ which python3
+
+ # make sure wheel is installed, which improves installing our dependencies
+ python3 -m pip install $PIP_OPTIONS wheel
+ python3 -m pip install $PIP_OPTIONS -r $GITHUB_ACTION_PATH/../python/requirements.txt
+ echo '##[endgroup]'
+ shell: bash
+
+ - name: Publish Test Results
+ id: test-results
+ run: |
+ echo '##[group]Publish Test Results'
+ # activate virtualenv
+ case "$RUNNER_OS" in
+ Linux*|macOS*)
+ source step-security-publish-action-venv/bin/activate;;
+ Windows*)
+ source step-security-publish-action-venv\\Scripts\\activate;;
+ esac
+ python3 $GITHUB_ACTION_PATH/../python/publish_test_results.py
+ echo '##[endgroup]'
+ env:
+ GITHUB_TOKEN: ${{ inputs.github_token }}
+ GITHUB_TOKEN_ACTOR: ${{ inputs.github_token_actor }}
+ GITHUB_RETRIES: ${{ inputs.github_retries }}
+ COMMIT: ${{ inputs.commit }}
+ CHECK_NAME: ${{ inputs.check_name }}
+ COMMENT_TITLE: ${{ inputs.comment_title }}
+ COMMENT_MODE: ${{ inputs.comment_mode }}
+ FAIL_ON: ${{ inputs.fail_on }}
+ ACTION_FAIL: ${{ inputs.action_fail }}
+ ACTION_FAIL_ON_INCONCLUSIVE: ${{ inputs.action_fail_on_inconclusive }}
+ FILES: ${{ inputs.files }}
+ JUNIT_FILES: ${{ inputs.junit_files }}
+ NUNIT_FILES: ${{ inputs.nunit_files }}
+ XUNIT_FILES: ${{ inputs.xunit_files }}
+ TRX_FILES: ${{ inputs.trx_files }}
+ TIME_UNIT: ${{ inputs.time_unit }}
+ TEST_FILE_PREFIX: ${{ inputs.test_file_prefix }}
+ REPORT_INDIVIDUAL_RUNS: ${{ inputs.report_individual_runs }}
+ REPORT_SUITE_LOGS: ${{ inputs.report_suite_logs }}
+ DEDUPLICATE_CLASSES_BY_FILE_NAME: ${{ inputs.deduplicate_classes_by_file_name }}
+ LARGE_FILES: ${{ inputs.large_files }}
+ IGNORE_RUNS: ${{ inputs.ignore_runs }}
+ COMPARE_TO_EARLIER_COMMIT: ${{ inputs.compare_to_earlier_commit }}
+ PULL_REQUEST_BUILD: ${{ inputs.pull_request_build }}
+ EVENT_FILE: ${{ inputs.event_file }}
+ EVENT_NAME: ${{ inputs.event_name }}
+ TEST_CHANGES_LIMIT: ${{ inputs.test_changes_limit }}
+ CHECK_RUN_ANNOTATIONS: ${{ inputs.check_run_annotations }}
+ CHECK_RUN_ANNOTATIONS_BRANCH: ${{ inputs.check_run_annotations_branch }}
+ SECONDS_BETWEEN_GITHUB_READS: ${{ inputs.seconds_between_github_reads }}
+ SECONDS_BETWEEN_GITHUB_WRITES: ${{ inputs.seconds_between_github_writes }}
+ SECONDARY_RATE_LIMIT_WAIT_SECONDS: ${{ inputs.secondary_rate_limit_wait_seconds }}
+ JSON_FILE: ${{ inputs.json_file }}
+ JSON_THOUSANDS_SEPARATOR: ${{ inputs.json_thousands_separator }}
+ JSON_SUITE_DETAILS: ${{ inputs.json_suite_details }}
+ JSON_TEST_CASE_RESULTS: ${{ inputs.json_test_case_results }}
+ JOB_SUMMARY: ${{ inputs.job_summary }}
+ SEARCH_PULL_REQUESTS: ${{ inputs.search_pull_requests }}
+ # not documented
+ ROOT_LOG_LEVEL: ${{ inputs.root_log_level }}
+ # not documented
+ LOG_LEVEL: ${{ inputs.log_level }}
+ shell: bash
+
+ - name: Save PIP packages cache
+ uses: actions/cache/save@v3
+ if: ( success() || failure() ) && ! steps.cache.outputs.cache-hit
+ continue-on-error: true
+ with:
+ path: ${{ steps.os.outputs.pip-cache }}
+ key: ${{ steps.cache.outputs.cache-primary-key }}
+
+branding:
+ icon: 'check-square'
+ color: 'green'
diff --git a/misc/action/fetch-workflows/action.yml b/misc/action/fetch-workflows/action.yml
new file mode 100644
index 0000000..0e5151b
--- /dev/null
+++ b/misc/action/fetch-workflows/action.yml
@@ -0,0 +1,32 @@
+name: 'Fetch workflows'
+description: 'A GitHub Action to find workflows matching a query'
+
+outputs:
+ total_workflows:
+ description: 'Total number of workflows using this action'
+ value: ${{ steps.workflows.outputs.total }}
+
+runs:
+ using: 'composite'
+ steps:
+ - name: Fetch workflows
+ id: workflows
+ shell: bash
+ run: |
+ for i in {1..60}
+ do
+ workflows=$(curl -s https://github.com/step-security/publish-unit-test-result-action | (grep "Used by" || true) | sed -e "s/.*title=//" -e 's/["]//g' | cut -d " " -f 1)
+ if [ -n "$workflows" ]
+ then
+ echo "total=$workflows" >> $GITHUB_OUTPUT
+ exit 0
+ fi
+ echo "Attempt $i failed"
+ sleep 60
+ done
+ echo "Giving up"
+ exit 1
+
+branding:
+ icon: 'download-cloud'
+ color: 'green'
diff --git a/misc/action/find-workflows/action.yml b/misc/action/find-workflows/action.yml
new file mode 100644
index 0000000..669ae19
--- /dev/null
+++ b/misc/action/find-workflows/action.yml
@@ -0,0 +1,41 @@
+name: 'Find workflows'
+description: 'A GitHub Action to find workflows matching a query'
+
+inputs:
+ url:
+ description: 'GitHub API URL'
+ required: true
+ query:
+ description: 'Query to find workflows'
+ required: true
+outputs:
+ total_workflows:
+ description: 'Total number of workflows'
+ value: ${{ steps.workflows.outputs.total }}
+
+runs:
+ using: 'composite'
+ steps:
+ - name: Setup Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.10'
+
+ - name: Install Python dependencies
+ shell: bash
+ run: |
+ python -m pip install --upgrade --force --no-cache-dir pip
+ pip install --force --no-cache-dir -r ${{ github.action_path }}/requirements.txt
+ pip freeze | sort
+
+ - name: Find workflows
+ id: workflows
+ env:
+ GITHUB_TOKEN: ${{ github.token }}
+ shell: bash
+ run: |
+ python ${{ github.action_path }}/script.py ${{ inputs.url }} ${{ inputs.query }}
+
+branding:
+ icon: 'download-cloud'
+ color: 'green'
diff --git a/misc/action/find-workflows/requirements.txt b/misc/action/find-workflows/requirements.txt
new file mode 100644
index 0000000..663bd1f
--- /dev/null
+++ b/misc/action/find-workflows/requirements.txt
@@ -0,0 +1 @@
+requests
\ No newline at end of file
diff --git a/misc/action/find-workflows/script.py b/misc/action/find-workflows/script.py
new file mode 100644
index 0000000..d9b3846
--- /dev/null
+++ b/misc/action/find-workflows/script.py
@@ -0,0 +1,28 @@
+import os
+import sys
+
+import requests
+
+
+if len(sys.argv) != 3:
+ print('Please provide GitHub API URL and the query string')
+ sys.exit(1)
+
+if 'GITHUB_TOKEN' not in os.environ:
+ print('Please provide GitHub token via GITHUB_TOKEN environment variable')
+ sys.exit(1)
+
+url = sys.argv[1]
+query = sys.argv[2]
+
+headers = {'Authorization': f'token {os.environ.get("GITHUB_TOKEN")}'}
+response = requests.get(f'{url}/search/code?q=%22{query}%22+path%3A.github%2Fworkflows%2F+language%3AYAML&type=Code', headers=headers).json()
+
+total = f'{response["total_count"]:,}'
+print(f'found {total} workflows')
+
+if 'GITHUB_OUTPUT' in os.environ:
+ with open(os.environ['GITHUB_OUTPUT'], 'wt') as w:
+ print(f'total={total}', file=w)
+else:
+ print(f'::set-output name=total::{total}')
diff --git a/misc/action/json-output/action.yml b/misc/action/json-output/action.yml
new file mode 100644
index 0000000..e12e588
--- /dev/null
+++ b/misc/action/json-output/action.yml
@@ -0,0 +1,62 @@
+name: 'Assert JSON output'
+description: 'A GitHub Action that asserts the publish action''s JSON output'
+
+inputs:
+ json:
+ description: 'JSON content to assess.'
+ required: true
+ json_file:
+ description: 'Path to the JSON file to assess.'
+ required: true
+
+runs:
+ using: 'composite'
+ steps:
+ - name: JSON file
+ shell: bash
+ run: |
+ jq . "${{ inputs.json_file }}"
+ jq .conclusion "${{ inputs.json_file }}"
+
+ - name: JSON output
+ if: always()
+ env:
+ TITLE: ${{ fromJSON( inputs.json ).title }}
+ SUMMARY: ${{ fromJSON( inputs.json ).summary }}
+ CONCLUSION: ${{ fromJSON( inputs.json ).conclusion }}
+ STATS: ${{ toJSON( fromJSON( inputs.json ).stats ) }}
+ STATS_WITH_DELTA: ${{ toJSON( fromJSON( inputs.json ).stats_with_delta ) }}
+ FORMATTED_STATS: ${{ toJSON( fromJSON( inputs.json ).formatted.stats ) }}
+ FORMATTED_STATS_WITH_DELTA: ${{ toJSON( fromJSON( inputs.json ).formatted.stats_with_delta ) }}
+ COMMIT: ${{ fromJSON( inputs.json ).stats.commit }}
+ REFERENCE: ${{ fromJSON( inputs.json ).stats_with_delta.reference_commit }}
+ ANNOTATIONS: ${{ fromJSON( inputs.json ).annotations }}
+ shell: bash
+ run: |
+ echo "title=$TITLE"
+ echo "summary=$SUMMARY"
+ echo "conclusion=$CONCLUSION"
+ echo "stats=$STATS"
+ echo "stats-with-delta=$STATS_WITH_DELTA"
+ echo "formatted-stats=$FORMATTED_STATS"
+ echo "formatted-stats-with-delta=$FORMATTED_STATS_WITH_DELTA"
+ echo "commit=$COMMIT"
+ echo "reference=$REFERENCE"
+ echo "annotations=$ANNOTATIONS"
+
+ echo
+ echo "JSON output:"
+ cat < 1000:
+ suffix = 'k'
+ n = n / 1000
+ if n > 1000:
+ suffix = 'M'
+ n = n / 1000
+ if n > 1000:
+ suffix = 'B'
+ n = n / 1000
+ if n > 100:
+ return f'{n:.0f}{suffix}'
+ else:
+ return f'{n:.1f}{suffix}'
+
+
+total = humanize(int(total))
+per_day = humanize(int(per_day))
+
+print(f'total={total}')
+print(f'per_day={per_day}')
+
+if 'GITHUB_OUTPUT' in os.environ:
+ print(f'output file is {os.environ["GITHUB_OUTPUT"]}')
+ with open(os.environ['GITHUB_OUTPUT'], 'at') as w:
+ print(f'total={total}', file=w)
+ print(f'per_day={per_day}', file=w)
+else:
+ print(f'::set-output name=total::{total}')
+ print(f'::set-output name=per_day::{per_day}')
diff --git a/misc/badge-arm.svg b/misc/badge-arm.svg
new file mode 100644
index 0000000..2fba611
--- /dev/null
+++ b/misc/badge-arm.svg
@@ -0,0 +1,20 @@
+
\ No newline at end of file
diff --git a/misc/badge-js.svg b/misc/badge-js.svg
new file mode 100644
index 0000000..c87b6b0
--- /dev/null
+++ b/misc/badge-js.svg
@@ -0,0 +1,20 @@
+
\ No newline at end of file
diff --git a/misc/badge-license.svg b/misc/badge-license.svg
new file mode 100644
index 0000000..c77ea68
--- /dev/null
+++ b/misc/badge-license.svg
@@ -0,0 +1,20 @@
+
\ No newline at end of file
diff --git a/misc/badge-macos.svg b/misc/badge-macos.svg
new file mode 100644
index 0000000..8e19a12
--- /dev/null
+++ b/misc/badge-macos.svg
@@ -0,0 +1,20 @@
+
\ No newline at end of file
diff --git a/misc/badge-trx.svg b/misc/badge-trx.svg
new file mode 100644
index 0000000..010c5b2
--- /dev/null
+++ b/misc/badge-trx.svg
@@ -0,0 +1,20 @@
+
\ No newline at end of file
diff --git a/misc/badge-ubuntu.svg b/misc/badge-ubuntu.svg
new file mode 100644
index 0000000..258b384
--- /dev/null
+++ b/misc/badge-ubuntu.svg
@@ -0,0 +1,20 @@
+
\ No newline at end of file
diff --git a/misc/badge-windows.svg b/misc/badge-windows.svg
new file mode 100644
index 0000000..9467f1a
--- /dev/null
+++ b/misc/badge-windows.svg
@@ -0,0 +1,20 @@
+
\ No newline at end of file
diff --git a/misc/badge-xml.svg b/misc/badge-xml.svg
new file mode 100644
index 0000000..f935934
--- /dev/null
+++ b/misc/badge-xml.svg
@@ -0,0 +1,20 @@
+
\ No newline at end of file
diff --git a/misc/github-checks-annotation.png b/misc/github-checks-annotation.png
new file mode 100644
index 0000000..406be60
Binary files /dev/null and b/misc/github-checks-annotation.png differ
diff --git a/misc/github-checks-comment.png b/misc/github-checks-comment.png
new file mode 100644
index 0000000..5589ba5
Binary files /dev/null and b/misc/github-checks-comment.png differ
diff --git a/misc/github-checks-commit.png b/misc/github-checks-commit.png
new file mode 100644
index 0000000..8e97bf7
Binary files /dev/null and b/misc/github-checks-commit.png differ
diff --git a/misc/github-job-summary-full.png b/misc/github-job-summary-full.png
new file mode 100644
index 0000000..cfbbdca
Binary files /dev/null and b/misc/github-job-summary-full.png differ
diff --git a/misc/github-job-summary.png b/misc/github-job-summary.png
new file mode 100644
index 0000000..e62eb6c
Binary files /dev/null and b/misc/github-job-summary.png differ
diff --git a/misc/github-pull-request-changes-annotation.png b/misc/github-pull-request-changes-annotation.png
new file mode 100644
index 0000000..beb6bc5
Binary files /dev/null and b/misc/github-pull-request-changes-annotation.png differ
diff --git a/misc/github-pull-request-checks.png b/misc/github-pull-request-checks.png
new file mode 100644
index 0000000..cdad4a0
Binary files /dev/null and b/misc/github-pull-request-checks.png differ
diff --git a/misc/github-pull-request-comment-update-history.png b/misc/github-pull-request-comment-update-history.png
new file mode 100644
index 0000000..4c4e5c5
Binary files /dev/null and b/misc/github-pull-request-comment-update-history.png differ
diff --git a/misc/github-pull-request-comment-with-test-changes.png b/misc/github-pull-request-comment-with-test-changes.png
new file mode 100644
index 0000000..1d16671
Binary files /dev/null and b/misc/github-pull-request-comment-with-test-changes.png differ
diff --git a/misc/github-pull-request-comment-without-runs.png b/misc/github-pull-request-comment-without-runs.png
new file mode 100644
index 0000000..1e4bdd1
Binary files /dev/null and b/misc/github-pull-request-comment-without-runs.png differ
diff --git a/misc/github-pull-request-comment.png b/misc/github-pull-request-comment.png
new file mode 100644
index 0000000..2b53288
Binary files /dev/null and b/misc/github-pull-request-comment.png differ
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 0000000..56711bb
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,2 @@
+[pytest]
+junit_family=xunit1
diff --git a/python/.gitignore b/python/.gitignore
new file mode 100644
index 0000000..0d20b64
--- /dev/null
+++ b/python/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/python/publish/__init__.py b/python/publish/__init__.py
new file mode 100644
index 0000000..15a1c6b
--- /dev/null
+++ b/python/publish/__init__.py
@@ -0,0 +1,1014 @@
+import base64
+import gzip
+import json
+import logging
+import re
+from collections import defaultdict
+from dataclasses import dataclass
+from typing import List, Any, Union, Optional, Tuple, Mapping, Iterator, Set, Iterable, Dict
+
+from publish.unittestresults import Numeric, UnitTestSuite, UnitTestCaseResults, UnitTestRunResults, \
+ UnitTestRunDeltaResults, UnitTestRunResultsOrDeltaResults, ParseError
+
+# keep the version in sync with action.yml
+__version__ = 'v1.0.0'
+
+logger = logging.getLogger('publish')
+digest_prefix = '[test-results]:data:'
+digest_mime_type = 'application/gzip'
+digest_encoding = 'base64'
+digest_header = f'{digest_prefix}{digest_mime_type};{digest_encoding},'
+digit_space = ' '
+punctuation_space = ' '
+
+comment_mode_off = 'off'
+comment_mode_always = 'always'
+comment_mode_changes = 'changes'
+comment_mode_changes_failures = 'changes in failures' # includes comment_mode_changes_errors
+comment_mode_changes_errors = 'changes in errors'
+comment_mode_failures = 'failures' # includes comment_mode_errors
+comment_mode_errors = 'errors'
+comment_modes = [
+ comment_mode_off,
+ comment_mode_always,
+ comment_mode_changes,
+ comment_mode_changes_failures,
+ comment_mode_changes_errors,
+ comment_mode_failures,
+ comment_mode_errors
+]
+
+fail_on_mode_nothing = 'nothing'
+fail_on_mode_errors = 'errors'
+fail_on_mode_failures = 'test failures'
+fail_on_modes = [
+ fail_on_mode_nothing,
+ fail_on_mode_errors,
+ fail_on_mode_failures
+]
+
+report_suite_out_log = 'info'
+report_suite_err_log = 'error'
+report_suite_logs = 'any'
+report_no_suite_logs = 'none'
+available_report_suite_logs = [report_suite_out_log, report_suite_err_log, report_suite_logs, report_no_suite_logs]
+default_report_suite_logs = report_no_suite_logs
+
+pull_request_build_mode_commit = 'commit'
+pull_request_build_mode_merge = 'merge'
+pull_request_build_modes = [
+ pull_request_build_mode_commit,
+ pull_request_build_mode_merge
+]
+
+all_tests_list = 'all tests'
+skipped_tests_list = 'skipped tests'
+none_annotations = 'none'
+available_annotations = [all_tests_list, skipped_tests_list, none_annotations]
+default_annotations = [all_tests_list, skipped_tests_list]
+
+
+class CaseMessages(defaultdict):
+ def __init__(self, items=None):
+ if items is None:
+ items = []
+ super(CaseMessages, self).__init__(
+ lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(list))),
+ items
+ )
+
+
+class SomeTestChanges:
+ def __init__(self,
+ all_tests_before: Optional[List[str]],
+ all_tests_current: Optional[List[str]],
+ skipped_tests_before: Optional[List[str]],
+ skipped_tests_current: Optional[List[str]]):
+ self._all_tests_before = set(all_tests_before) if all_tests_before is not None else None
+ self._all_tests_current = set(all_tests_current) if all_tests_current is not None else None
+ self._skipped_tests_before = set(skipped_tests_before) if skipped_tests_before is not None else None
+ self._skipped_tests_current = set(skipped_tests_current) if skipped_tests_current is not None else None
+
+ @property
+ def has_changes(self) -> bool:
+ return (self.adds() is not None and self.removes() is not None and len(self.adds().union(self.removes())) > 0 or
+ self.skips() is not None and self.un_skips() is not None and len(self.skips().union(self.un_skips())) > 0)
+
+ def adds(self) -> Optional[Set[str]]:
+ if self._all_tests_before is None or self._all_tests_current is None:
+ return None
+ return self._all_tests_current - self._all_tests_before
+
+ def removes(self) -> Optional[Set[str]]:
+ if self._all_tests_before is None or self._all_tests_current is None:
+ return None
+ return self._all_tests_before - self._all_tests_current
+
+ def remains(self) -> Optional[Set[str]]:
+ if self._all_tests_before is None or self._all_tests_current is None:
+ return None
+ return self._all_tests_before.intersection(self._all_tests_current)
+
+ def has_no_tests(self) -> bool:
+ return (len(self._all_tests_current) == 0) if self._all_tests_current is not None else False
+
+ def skips(self) -> Optional[Set[str]]:
+ if self._skipped_tests_before is None or self._skipped_tests_current is None:
+ return None
+ return self._skipped_tests_current - self._skipped_tests_before
+
+ def un_skips(self) -> Optional[Set[str]]:
+ if self._skipped_tests_before is None or self._skipped_tests_current is None:
+ return None
+ return self._skipped_tests_before - self._skipped_tests_current
+
+ def added_and_skipped(self) -> Optional[Set[str]]:
+ added = self.adds()
+ skipped = self.skips()
+ if added is None or skipped is None:
+ return None
+ return added.intersection(skipped)
+
+ def remaining_and_skipped(self) -> Optional[Set[str]]:
+ remaining = self.remains()
+ skipped = self.skips()
+ if remaining is None or skipped is None:
+ return None
+ return remaining.intersection(skipped)
+
+ def remaining_and_un_skipped(self) -> Optional[Set[str]]:
+ remaining = self.remains()
+ un_skipped = self.un_skips()
+ if remaining is None or un_skipped is None:
+ return None
+ return remaining.intersection(un_skipped)
+
+ def removed_skips(self) -> Optional[Set[str]]:
+ removed = self.removes()
+ skipped_before = self._skipped_tests_before
+ if removed is None or skipped_before is None:
+ return None
+ return skipped_before.intersection(removed)
+
+
+def get_json_path(json: Dict[str, Any], path: Union[str, List[str]]) -> Any:
+ if isinstance(path, str):
+ path = path.split('.')
+
+ if path[0] not in json:
+ return None
+
+ elem = json[path[0]]
+
+ if len(path) > 1:
+ if isinstance(elem, dict):
+ return get_json_path(elem, path[1:])
+ else:
+ return None
+ else:
+ return elem
+
+
+def utf8_character_length(c: int) -> int:
+ if c >= 0x00010000:
+ return 4
+ if c >= 0x00000800:
+ return 3
+ if c >= 0x00000080:
+ return 2
+ return 1
+
+
+# Github API does not like Unicode characters above 0xffff
+# Those characters are replaced here by \U00000000
+def restrict_unicode(text: Optional[str]) -> Optional[str]:
+ if text is None:
+ return None
+ return ''.join([r"\U{:08x}".format(ord(c)) if ord(c) > 0xffff else c
+ for c in text])
+
+
+def restrict_unicode_list(texts: List[Optional[str]]) -> List[Optional[str]]:
+ return [restrict_unicode(text) for text in texts]
+
+
+def alternating_range(positive_first: bool = True) -> Iterator[int]:
+ i = 0
+ yield i
+
+ if positive_first:
+ while True:
+ i += 1
+ yield i
+ yield -i
+ else:
+ while True:
+ i += 1
+ yield -i
+ yield i
+
+
+def abbreviate_bytes(string: Optional[str], length: int) -> Optional[str]:
+ if length < 3:
+ raise ValueError(f'Length must at least allow for the replacement character: {length}')
+
+ if string is None:
+ return None
+
+ char_length = len(string)
+ byte_length = len(string.encode('utf8'))
+ if byte_length <= length:
+ return string
+
+ odd = char_length % 2
+ middle = char_length // 2
+ pre = middle
+ suf = char_length - middle
+ for index in alternating_range(odd == 1):
+ if index >= 0:
+ suf -= 1
+ else:
+ pre -= 1
+ byte_length -= utf8_character_length(ord(string[middle + index]))
+ if byte_length <= length - 3:
+ return string[:pre] + '…' + (string[-suf:] if suf else '')
+
+
+def abbreviate(string: Optional[str], length: int) -> Optional[str]:
+ if length < 1:
+ raise ValueError(f'Length must at least allow for the replacement character: {length}')
+
+ if string is None:
+ return None
+
+ char_length = len(string)
+ if char_length <= length:
+ return string
+
+ pre = length // 2
+ suf = (length - 1) // 2
+ return string[:pre] + '…' + (string[-suf:] if suf else '')
+
+
+def get_formatted_digits(*numbers: Union[Optional[int], Numeric]) -> Tuple[int, int]:
+ def get_abs_number(num):
+ if isinstance(num, dict):
+ return abs(num.get('number')) if num.get('number') is not None else None
+ return abs(num)
+
+ def get_abs_delta(num):
+ if isinstance(num, dict):
+ return abs(num.get('delta')) if num.get('delta') is not None else None
+ return 0
+
+ if isinstance(numbers[0], dict):
+ # only the first number is a dict, other still might be an int
+ number_digits = max([len(as_stat_number(get_abs_number(number))) for number in numbers])
+ delta_digits = max([len(as_stat_number(get_abs_delta(number))) for number in numbers])
+ return number_digits, delta_digits
+
+ return max([len(as_stat_number(abs(number) if number is not None else None))
+ for number in numbers]), 0
+
+
+def get_magnitude(value: Union[int, dict]) -> Optional[int]:
+ if value is None:
+ return None
+ if isinstance(value, int):
+ return value
+ if isinstance(value, dict):
+ if 'number' in value:
+ return value.get('number')
+ if 'duration' in value:
+ return value.get('duration')
+ return None
+
+
+def get_delta(value: Optional[Union[int, Numeric]]) -> Optional[int]:
+ if isinstance(value, int):
+ return None
+ if isinstance(value, Mapping): # Numeric
+ return value.get('delta')
+ return None
+
+
+def as_short_commit(commit: Optional[str]) -> str:
+ return commit[0:8] if commit else None
+
+
+def as_delta(number: int, digits: int) -> str:
+ string = as_stat_number(abs(number), digits)
+ if number == 0:
+ sign = '±'
+ elif number > 0:
+ sign = '+'
+ else:
+ sign = ' - '
+ return f'{sign}{string}'
+
+
+def as_stat_number(number: Optional[Union[int, Numeric]],
+ number_digits: int = 0,
+ delta_digits: int = 0,
+ label: Optional[str] = None) -> str:
+ if number is None:
+ if label:
+ return 'N/A {}'.format(label)
+ return 'N/A'
+ if isinstance(number, int):
+ formatted = '{number:0{digits},}'.format(number=number, digits=number_digits)
+ res = re.search('[^0,]', formatted)
+ pos = res.start() if res else len(formatted)-1
+ formatted = '{}{}'.format(formatted[:pos].replace('0', digit_space), formatted[pos:])
+ formatted = formatted.replace(',', punctuation_space)
+ if label:
+ return '{} {}'.format(formatted, label)
+ return formatted
+ elif isinstance(number, dict):
+ extra_fields = [
+ as_delta(number['delta'], delta_digits) if 'delta' in number else '',
+ as_stat_number(number['new'], 0, 0, 'new') if 'new' in number else '',
+ as_stat_number(number['gone'], 0, 0, 'gone') if 'gone' in number else '',
+ ]
+ extra = ', '.join([field for field in extra_fields if field != ''])
+
+ return ''.join([
+ as_stat_number(number.get('number'), number_digits, delta_digits, label),
+ f' {extra} ' if extra != '' else ''
+ ])
+ else:
+ logger.warning(f'unsupported stats number type {type(number)}: {number}')
+ return 'N/A'
+
+
+def as_stat_duration(duration: Optional[Union[float, int, Numeric]], label=None) -> str:
+ if duration is None:
+ if label:
+ return f'N/A {label}'
+ return 'N/A'
+ if isinstance(duration, float):
+ duration = int(duration)
+ if isinstance(duration, int):
+ duration = abs(duration)
+ strings = []
+ for unit, denominator in [('s', 60), ('m', 60), ('h', 24)]:
+ if unit == 's' or duration:
+ strings.insert(0, f'{duration % denominator}{unit}')
+ duration //= denominator
+ if duration:
+ strings.insert(0, f'{duration}d')
+ string = ' '.join(strings)
+ if label:
+ return f'{string} {label}'
+ return string
+ elif isinstance(duration, dict):
+ delta = duration.get('delta')
+ duration = duration.get('duration')
+ sign = '' if delta is None else '±' if delta == 0 else '+' if delta > 1 else '-'
+ if delta and abs(delta) >= 60:
+ sign += ' '
+ return as_stat_duration(duration, label) + (f' {sign}{as_stat_duration(delta)}' if delta is not None else '')
+ else:
+ logger.warning(f'unsupported stats duration type {type(duration)}: {duration}')
+ return 'N/A'
+
+
+def digest_string(string: str) -> str:
+ return str(base64.encodebytes(gzip.compress(bytes(string, 'utf8'), compresslevel=9)), 'utf8') \
+ .replace('\n', '')
+
+
+def ungest_string(string: str) -> str:
+ return str(gzip.decompress(base64.decodebytes(bytes(string, 'utf8'))), 'utf8')
+
+
+def get_digest_from_stats(stats: UnitTestRunResults) -> str:
+ d = stats.to_dict()
+ del d['errors'] # we don't need errors in the digest
+ return digest_string(json.dumps(d, ensure_ascii=False))
+
+
+def get_stats_from_digest(digest: str) -> UnitTestRunResults:
+ return UnitTestRunResults.from_dict(json.loads(ungest_string(digest)))
+
+
+def get_short_summary(stats: UnitTestRunResults) -> str:
+ """Provides a single-line summary for the given stats."""
+ perrors = len(stats.errors)
+ tests = get_magnitude(stats.tests)
+ success = get_magnitude(stats.tests_succ)
+ skipped = get_magnitude(stats.tests_skip)
+ failure = get_magnitude(stats.tests_fail)
+ error = get_magnitude(stats.tests_error)
+ duration = get_magnitude(stats.duration)
+
+ def get_test_summary():
+ if tests == 0:
+ if perrors == 0:
+ return 'No tests found'
+ else:
+ return f'{perrors} parse errors'
+ if tests > 0:
+ if (failure is None or failure == 0) and \
+ (error is None or error == 0) and perrors == 0:
+ if skipped == 0 and success == tests:
+ return 'All {} pass'.format(as_stat_number(tests, 0, 0, 'tests'))
+ if skipped > 0 and success == tests - skipped:
+ return 'All {} pass, {}'.format(
+ as_stat_number(success, 0, 0, 'tests'),
+ as_stat_number(skipped, 0, 0, 'skipped')
+ )
+
+ summary = [as_stat_number(number, 0, 0, label)
+ for number, label in [(perrors, 'parse errors'),
+ (error, 'errors'), (failure, 'fail'),
+ (skipped, 'skipped'), (success, 'pass')]
+ if number > 0]
+ summary = ', '.join(summary)
+
+ # when all except tests are None or 0
+ if len(summary) == 0:
+ return f'{as_stat_number(tests, 0, 0, "tests")} found'
+ return summary
+
+ if tests is None or tests == 0 or duration is None:
+ return get_test_summary()
+
+ return f'{get_test_summary()} in {as_stat_duration(duration)}'
+
+
+def get_link_and_tooltip_label_md(label: str, tooltip: str) -> str:
+ return '[{label}]({link} "{tooltip}")'.format(
+ label=label,
+ link=f'https://github.com/step-security/publish-unit-test-result-action/blob/{__version__}/README.md#the-symbols',
+ tooltip=tooltip
+ )
+
+
+all_tests_label_md = 'tests'
+passed_tests_label_md = get_link_and_tooltip_label_md(':heavy_check_mark:', 'passed tests')
+skipped_tests_label_md = get_link_and_tooltip_label_md(':zzz:', 'skipped / disabled tests')
+failed_tests_label_md = get_link_and_tooltip_label_md(':x:', 'failed tests')
+test_errors_label_md = get_link_and_tooltip_label_md(':fire:', 'test errors')
+duration_label_md = get_link_and_tooltip_label_md(':stopwatch:', 'duration of all tests')
+
+
+def get_short_summary_md(stats: UnitTestRunResultsOrDeltaResults) -> str:
+ """Provides a single-line summary with markdown for the given stats."""
+ md = ('{tests} {tests_succ} {tests_skip} {tests_fail} {tests_error}'.format(
+ tests=as_stat_number(stats.tests, 0, 0, all_tests_label_md),
+ tests_succ=as_stat_number(stats.tests_succ, 0, 0, passed_tests_label_md),
+ tests_skip=as_stat_number(stats.tests_skip, 0, 0, skipped_tests_label_md),
+ tests_fail=as_stat_number(stats.tests_fail, 0, 0, failed_tests_label_md),
+ tests_error=as_stat_number(stats.tests_error, 0, 0, test_errors_label_md),
+ ))
+ return md
+
+
+def get_test_changes_summary_md(changes: Optional[SomeTestChanges], list_limit: Optional[int]) -> str:
+ if not changes or list_limit == 0 or changes.has_no_tests():
+ return ''
+
+ test_changes_details = []
+ if changes.removes():
+ if changes.adds():
+ test_changes_details.append(
+ get_test_changes_md(
+ 'This pull request removes {} and adds {} tests. '
+ 'Note that renamed tests count towards both.'.format(
+ len(changes.removes()),
+ len(changes.adds()),
+ ),
+ list_limit,
+ changes.removes(),
+ changes.adds()
+ )
+ )
+ else:
+ test_changes_details.append(
+ get_test_changes_md(
+ 'This pull request removes {} test{}.'.format(
+ len(changes.removes()),
+ 's' if len(changes.removes()) > 1 else ''
+ ),
+ list_limit,
+ list(changes.removes())
+ )
+ )
+
+ if changes.removed_skips() and changes.added_and_skipped():
+ test_changes_details.append(
+ get_test_changes_md(
+ 'This pull request removes {} skipped test{} and adds {} skipped test{}. '
+ 'Note that renamed tests count towards both.'.format(
+ len(changes.removed_skips()),
+ 's' if len(changes.removed_skips()) > 1 else '',
+ len(changes.added_and_skipped()),
+ 's' if len(changes.added_and_skipped()) > 1 else ''
+ ),
+ list_limit,
+ changes.removed_skips(),
+ changes.added_and_skipped()
+ )
+ )
+
+ if changes.remaining_and_skipped():
+ if changes.remaining_and_un_skipped():
+ test_changes_details.append(
+ get_test_changes_md(
+ 'This pull request skips {} and un-skips {} tests.'.format(
+ len(changes.remaining_and_skipped()),
+ len(changes.remaining_and_un_skipped())
+ ),
+ list_limit,
+ changes.remaining_and_skipped(),
+ changes.remaining_and_un_skipped()
+ )
+ )
+ else:
+ test_changes_details.append(
+ get_test_changes_md(
+ 'This pull request skips {} test{}.'.format(
+ len(changes.remaining_and_skipped()),
+ 's' if len(changes.remaining_and_skipped()) > 1 else ''
+ ),
+ list_limit,
+ changes.remaining_and_skipped()
+ )
+ )
+
+ return '\n'.join(test_changes_details)
+
+
+def get_test_changes_md(summary: str, list_limit: Optional[int], *tests: Iterable[str]) -> str:
+ tests = '\n'.join([get_test_changes_list_md(sorted(test), list_limit) for test in tests])
+ return (
+ f'\n'
+ f' {summary}
\n'
+ f'\n'
+ f'{tests}'
+ f' \n'
+ )
+
+
+def get_test_changes_list_md(tests: List[str], limit: Optional[int]) -> str:
+ if limit:
+ tests = tests[:limit] + (['…'] if len(tests) > limit else [])
+ tests = '\n'.join(tests)
+ return f'```\n{tests}\n```\n'
+
+
+def get_long_summary_md(stats: UnitTestRunResultsOrDeltaResults,
+ details_url: Optional[str] = None,
+ test_changes: Optional[SomeTestChanges] = None,
+ test_list_changes_limit: Optional[int] = None) -> str:
+ """Provides a long summary in Markdown notation for the given stats."""
+ trivial_runs = stats.runs == stats.tests and \
+ stats.runs_succ == stats.tests_succ and \
+ stats.runs_skip == stats.tests_skip and \
+ stats.runs_fail == stats.tests_fail and \
+ stats.runs_error == stats.tests_error
+
+ if trivial_runs:
+ return get_long_summary_without_runs_md(stats, details_url, test_changes, test_list_changes_limit)
+ else:
+ return get_long_summary_with_runs_md(stats, details_url, test_changes, test_list_changes_limit)
+
+
+def get_details_line_md(stats: UnitTestRunResultsOrDeltaResults,
+ details_url: Optional[str] = None) -> str:
+ errors = len(stats.errors)
+ details_on = (['parsing errors'] if errors > 0 else []) + \
+ (['failures'] if get_magnitude(stats.tests_fail) > 0 else []) + \
+ (['errors'] if get_magnitude(stats.tests_error) > 0 else [])
+ details_on = details_on[0:-2] + [' and '.join(details_on[-2:])] if details_on else []
+
+ return 'For more details on these {details_on}, see [this check]({url}).'.format(
+ details_on=', '.join(details_on),
+ url=details_url
+ ) if details_url and details_on else ''
+
+
+def get_commit_line_md(stats: UnitTestRunResultsOrDeltaResults) -> str:
+ commit = stats.commit
+ is_delta_stats = isinstance(stats, UnitTestRunDeltaResults)
+ reference_type = stats.reference_type if is_delta_stats else None
+ reference_commit = stats.reference_commit if is_delta_stats else None
+
+ return 'Results for commit {commit}.{compare}'.format(
+ commit=as_short_commit(commit),
+ compare=' ± Comparison against {reference_type} commit {reference_commit}.'.format(
+ reference_type=reference_type,
+ reference_commit=as_short_commit(reference_commit)
+ ) if reference_type and reference_commit else ''
+ )
+
+
+def get_long_summary_with_runs_md(stats: UnitTestRunResultsOrDeltaResults,
+ details_url: Optional[str] = None,
+ test_changes: Optional[SomeTestChanges] = None,
+ test_list_changes_limit: Optional[int] = None) -> str:
+ files_digits, files_delta_digits = get_formatted_digits(stats.files, stats.tests, stats.runs)
+ success_digits, success_delta_digits = get_formatted_digits(stats.suites, stats.tests_succ, stats.runs_succ)
+ skip_digits, skip_delta_digits = get_formatted_digits(stats.tests_skip, stats.runs_skip)
+ fail_digits, fail_delta_digits = get_formatted_digits(stats.tests_fail, stats.runs_fail)
+ error_digits, error_delta_digits = get_formatted_digits(stats.tests_error, stats.runs_error)
+
+ errors = len(stats.errors)
+ misc_line = '{files} {errors}{suites} {duration}\n'.format(
+ files=as_stat_number(stats.files, files_digits, files_delta_digits, 'files '),
+ errors='{} '.format(as_stat_number(errors, success_digits, 0, 'errors ')) if errors > 0 else '',
+ suites=as_stat_number(stats.suites, success_digits if errors == 0 else skip_digits, 0, 'suites '),
+ duration=as_stat_duration(stats.duration, duration_label_md)
+ )
+
+ tests_error_part = ' {tests_error}'.format(
+ tests_error=as_stat_number(stats.tests_error, error_digits, error_delta_digits, test_errors_label_md)
+ ) if get_magnitude(stats.tests_error) else ''
+ tests_line = '{tests} {tests_succ} {tests_skip} {tests_fail}{tests_error_part}\n'.format(
+ tests=as_stat_number(stats.tests, files_digits, files_delta_digits, all_tests_label_md),
+ tests_succ=as_stat_number(stats.tests_succ, success_digits, success_delta_digits, passed_tests_label_md),
+ tests_skip=as_stat_number(stats.tests_skip, skip_digits, skip_delta_digits, skipped_tests_label_md),
+ tests_fail=as_stat_number(stats.tests_fail, fail_digits, fail_delta_digits, failed_tests_label_md),
+ tests_error_part=tests_error_part
+ )
+
+ runs_error_part = ' {runs_error}'.format(
+ runs_error=as_stat_number(stats.runs_error, error_digits, error_delta_digits, test_errors_label_md)
+ ) if get_magnitude(stats.runs_error) else ''
+ runs_line = '{runs} {runs_succ} {runs_skip} {runs_fail}{runs_error_part}\n'.format(
+ runs=as_stat_number(stats.runs, files_digits, files_delta_digits, 'runs '),
+ runs_succ=as_stat_number(stats.runs_succ, success_digits, success_delta_digits, passed_tests_label_md),
+ runs_skip=as_stat_number(stats.runs_skip, skip_digits, skip_delta_digits, skipped_tests_label_md),
+ runs_fail=as_stat_number(stats.runs_fail, fail_digits, fail_delta_digits, failed_tests_label_md),
+ runs_error_part=runs_error_part,
+ )
+
+ details_line = get_details_line_md(stats, details_url)
+ commit_line = get_commit_line_md(stats)
+ test_changes_details = get_test_changes_summary_md(test_changes, test_list_changes_limit)
+
+ return '{misc}{tests}{runs}{details}{commit}{test_changes_details}'.format(
+ misc=misc_line,
+ tests=tests_line,
+ runs=runs_line,
+ details=new_lines(details_line),
+ commit=new_lines(commit_line),
+ test_changes_details=new_line(test_changes_details)
+ )
+
+
+def new_line(text: str, before: bool = True) -> str:
+ if before:
+ return ('\n' + text) if text else text
+ else:
+ return (text + '\n') if text else text
+
+
+def new_lines(text: str) -> str:
+ return ('\n' + text + '\n') if text else text
+
+
+def get_long_summary_without_runs_md(stats: UnitTestRunResultsOrDeltaResults,
+ details_url: Optional[str] = None,
+ test_changes: Optional[SomeTestChanges] = None,
+ test_list_changes_limit: Optional[int] = None) -> str:
+ sep = ' '
+
+ errors = len(stats.errors)
+ tests_digits, tests_delta_digits = get_formatted_digits(stats.tests, stats.suites, stats.files, errors)
+ passs_digits, passs_delta_digits = get_formatted_digits(stats.tests_succ, stats.tests_skip, stats.tests_fail, stats.tests_error)
+
+ tests = as_stat_number(stats.tests, tests_digits, tests_delta_digits, all_tests_label_md + ' ')
+ suites = as_stat_number(stats.suites, tests_digits, tests_delta_digits, 'suites')
+ files = as_stat_number(stats.files, tests_digits, tests_delta_digits, 'files ')
+ parse_errors = as_stat_number(errors, tests_digits, tests_delta_digits, 'errors') if errors else ''
+
+ passs = as_stat_number(stats.tests_succ, passs_digits, passs_delta_digits, passed_tests_label_md)
+ skips = as_stat_number(stats.tests_skip, passs_digits, passs_delta_digits, skipped_tests_label_md)
+ fails = as_stat_number(stats.tests_fail, passs_digits, passs_delta_digits, failed_tests_label_md)
+
+ duration = as_stat_duration(stats.duration, duration_label_md)
+ errors = sep + as_stat_number(stats.tests_error, label=test_errors_label_md) if get_magnitude(stats.tests_error) else ''
+
+ details_line = get_details_line_md(stats, details_url)
+ commit_line = get_commit_line_md(stats)
+ test_changes_details = get_test_changes_summary_md(test_changes, test_list_changes_limit)
+
+ return '{tests}{sep}{passs}{sep}{duration}\n' \
+ '{suites}{sep}{skips}\n' \
+ '{files}{sep}{fails}{errors}\n' \
+ '{parse_errors}{details}{commit}{test_changes_details}'.format(
+ sep=sep,
+ tests=tests,
+ passs=passs,
+ duration=duration,
+ suites=suites,
+ skips=skips,
+ files=files,
+ fails=fails,
+ errors=errors,
+ parse_errors=new_line(parse_errors, before=False),
+ details=new_lines(details_line),
+ commit=new_lines(commit_line),
+ test_changes_details=new_line(test_changes_details)
+ )
+
+
+def get_long_summary_with_digest_md(stats: UnitTestRunResultsOrDeltaResults,
+ digest_stats: Optional[UnitTestRunResults] = None,
+ details_url: Optional[str] = None,
+ test_changes: Optional[SomeTestChanges] = None,
+ test_list_changes_limit: Optional[int] = None) -> str:
+ """
+ Provides the summary of stats with digest of digest_stats if given, otherwise
+ digest of stats. In that case, stats must be UnitTestRunResults.
+
+ :param stats: stats to summarize
+ :param digest_stats: stats to digest
+ :return: summary with digest
+ """
+ if digest_stats is None and isinstance(stats, UnitTestRunDeltaResults):
+ raise ValueError('stats must be UnitTestRunResults when no digest_stats is given')
+ summary = get_long_summary_md(stats, details_url, test_changes, test_list_changes_limit)
+ digest = get_digest_from_stats(stats if digest_stats is None else digest_stats)
+ return f'{summary}\n{digest_header}{digest}\n'
+
+
+def get_case_messages(case_results: UnitTestCaseResults) -> CaseMessages:
+ """ Re-index cases from test+state to test+state+message. """
+ messages = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
+ for test in case_results:
+ for state in case_results[test]:
+ for case in case_results[test][state]:
+ message = case.message if case.result in ['skipped', 'disabled'] else case.content
+ messages[test][state][message].append(case)
+ return CaseMessages(messages)
+
+
+@dataclass(frozen=True)
+class Annotation:
+ path: str
+ start_line: int
+ end_line: int
+ start_column: Optional[int]
+ end_column: Optional[int]
+ annotation_level: str
+ message: str
+ title: Optional[str]
+ raw_details: Optional[str]
+
+ def to_dict(self) -> Mapping[str, Any]:
+ dictionary = self.__dict__.copy()
+ dictionary['path'] = restrict_unicode(dictionary['path'])
+ dictionary['message'] = abbreviate_bytes(restrict_unicode(dictionary['message']), 64000)
+ dictionary['title'] = abbreviate(restrict_unicode(dictionary['title']), 255)
+ dictionary['raw_details'] = abbreviate(restrict_unicode(dictionary['raw_details']), 64000)
+ if not dictionary.get('start_column'):
+ del dictionary['start_column']
+ if not dictionary.get('end_column'):
+ del dictionary['end_column']
+ if not dictionary.get('title'):
+ del dictionary['title']
+ if not dictionary.get('raw_details'):
+ del dictionary['raw_details']
+ return dictionary
+
+
+def message_is_contained_in_content(message: Optional[str], content: Optional[str]) -> bool:
+ # ignore new lines and any leading or trailing white spaces
+ if content and message:
+ content = re.sub(r'\s+', ' ', content.strip())
+ message = re.sub(r'\s+', ' ', message.strip())
+ return content.startswith(message)
+ return False
+
+
+def get_case_annotation(messages: CaseMessages,
+ key: Tuple[Optional[str], Optional[str], Optional[str]],
+ state: str,
+ message: Optional[str],
+ report_individual_runs: bool) -> Annotation:
+ case = messages[key][state][message][0]
+ same_cases = len(messages[key][state][message] if report_individual_runs else
+ [case
+ for m in messages[key][state]
+ for case in messages[key][state][m]])
+ all_cases = len([case
+ for s in messages[key]
+ for m in messages[key][s]
+ for case in messages[key][s][m]])
+ same_result_files = {case.result_file: case.time
+ for case in (messages[key][state][message] if report_individual_runs else
+ [c
+ for m in messages[key][state]
+ for c in messages[key][state][m]])
+ if case.result_file}
+ test_file = case.test_file
+ line = case.line or 0
+ test_name = case.test_name if case.test_name else 'Unknown test'
+ class_name = case.class_name
+ title = test_name if not class_name else f'{test_name} ({class_name})'
+ title_state = \
+ 'pass' if state == 'success' else \
+ 'failed' if state == 'failure' else \
+ 'with error' if state == 'error' else \
+ 'skipped'
+ if all_cases > 1:
+ if same_cases == all_cases:
+ title = f'All {all_cases} runs {title_state}: {title}'
+ else:
+ title = f'{same_cases} out of {all_cases} runs {title_state}: {title}'
+ else:
+ title = f'{title} {title_state}'
+
+ level = (
+ 'warning' if case.result == 'failure' else
+ 'failure' if case.result == 'error' else # failure is used for test errors
+ 'notice'
+ )
+
+ # pick details from message and content, but try to avoid redundancy (e.g. when content repeats message)
+ # always add stdout and stderr if they are not empty
+ maybe_message = [case.message] if not message_is_contained_in_content(case.message, case.content) else []
+ details = [detail.rstrip()
+ for detail in maybe_message + [case.content, case.stdout, case.stderr]
+ if detail and detail.rstrip()]
+
+ return Annotation(
+ path=test_file or class_name or '/',
+ start_line=line,
+ end_line=line,
+ start_column=None,
+ end_column=None,
+ annotation_level=level,
+ message='\n'.join([file if time is None else f'{file} [took {as_stat_duration(time)}]'
+ for file, time in sorted(same_result_files.items())]),
+ title=title,
+ raw_details='\n'.join(details) if details else None
+ )
+
+
+def get_case_annotations(case_results: UnitTestCaseResults,
+ report_individual_runs: bool) -> List[Annotation]:
+ messages = get_case_messages(case_results)
+ return [
+ get_case_annotation(messages, key, state, message, report_individual_runs)
+ for key in messages
+ for state in messages[key] if state not in ['success', 'skipped']
+ for message in (messages[key][state] if report_individual_runs else
+ [list(messages[key][state].keys())[0]])
+ ]
+
+
+def get_error_annotation(error: ParseError) -> Annotation:
+ return Annotation(
+ path=error.file,
+ start_line=error.line or 0,
+ end_line=error.line or 0,
+ start_column=error.column,
+ end_column=error.column,
+ annotation_level='failure',
+ message=error.message,
+ title=f'Error processing result file',
+ raw_details=error.file
+ )
+
+
+def get_error_annotations(parse_errors: List[ParseError]) -> List[Annotation]:
+ return [get_error_annotation(error) for error in parse_errors]
+
+
+def get_suite_annotations_for_suite(suite: UnitTestSuite, with_suite_out_logs: bool, with_suite_err_logs: bool) -> List[Annotation]:
+ return [
+ Annotation(
+ path=suite.name,
+ start_line=0,
+ end_line=0,
+ start_column=None,
+ end_column=None,
+ annotation_level='warning' if source == 'stderr' else 'notice',
+ message=f'Test suite {suite.name} has the following {source} output (see Raw output).',
+ title=f'Logging on {source} of test suite {suite.name}',
+ raw_details=details
+ )
+ for details, source in ([(suite.stdout, 'stdout')] if with_suite_out_logs else []) +
+ ([(suite.stderr, 'stderr')] if with_suite_err_logs else [])
+ if details and details.strip()
+ ]
+
+
+def get_suite_annotations(suites: List[UnitTestSuite], with_suite_out_logs: bool, with_suite_err_logs: bool) -> List[Annotation]:
+ return [annotation
+ for suite in suites
+ for annotation in get_suite_annotations_for_suite(suite, with_suite_out_logs, with_suite_err_logs)]
+
+
+def get_test_name(file_name: Optional[str],
+ class_name: Optional[str],
+ test_name: Optional[str]) -> str:
+ if not test_name:
+ test_name = 'Unknown test'
+
+ name = []
+ token = ' ‑ ' # U+2011 non-breaking hyphen
+ for part in [file_name, class_name, test_name]:
+ if part:
+ name.append(part.replace(token, ' ‐ ')) # U+2010 breaking hyphen
+
+ return token.join(name)
+
+
+def get_all_tests_list(cases: UnitTestCaseResults) -> List[str]:
+ if not cases:
+ return []
+ return [get_test_name(file_name, class_name, test_name)
+ for (file_name, class_name, test_name) in cases.keys()]
+
+
+def get_skipped_tests_list(cases: UnitTestCaseResults) -> List[str]:
+ if not cases:
+ return []
+ return [get_test_name(file_name, class_name, test_name)
+ for (file_name, class_name, test_name), result in cases.items()
+ if 'skipped' in result and len(result) == 1]
+
+
+def get_all_tests_list_annotation(cases: UnitTestCaseResults, max_chunk_size: int = 64000) -> List[Annotation]:
+ return get_test_list_annotation(restrict_unicode_list(get_all_tests_list(cases)), 'test', max_chunk_size)
+
+
+def get_skipped_tests_list_annotation(cases: UnitTestCaseResults, max_chunk_size: int = 64000) -> List[Annotation]:
+ return get_test_list_annotation(restrict_unicode_list(get_skipped_tests_list(cases)), 'skipped test', max_chunk_size)
+
+
+def get_test_list_annotation(tests: List[str], label: str, max_chunk_size: int = 64000) -> List[Annotation]:
+ if len(tests) == 0:
+ return []
+
+ # the max_chunk_size must not be larger than the abbreviate_bytes limit in Annotation.to_dict
+ test_chunks = chunk_test_list(sorted(tests), '\n', max_chunk_size)
+
+ if len(test_chunks) == 1:
+ if len(tests) == 1:
+ title = f'{len(tests)} {label} found'
+ message = f'There is 1 {label}, see "Raw output" for the name of the {label}.'
+ else:
+ title = f'{len(tests)} {label}s found'
+ message = f'There are {len(tests)} {label}s, see "Raw output" for the full list of {label}s.'
+
+ return [create_tests_list_annotation(title=title, message=message, raw_details='\n'.join(test_chunks[0]))]
+
+ first = 1
+ annotations = []
+ for chunk in test_chunks:
+ last = first + len(chunk) - 1
+ title = f'{len(tests)} {label}s found (test {first} to {last})'
+ message = f'There are {len(tests)} {label}s, see "Raw output" for the list of {label}s {first} to {last}.'
+ annotation = create_tests_list_annotation(title=title, message=message, raw_details='\n'.join(chunk))
+ annotations.append(annotation)
+ first = last + 1
+
+ return annotations
+
+
+def chunk_test_list(tests: List[str], delimiter: str, max_chunk_size: int) -> List[List[str]]:
+ if not tests:
+ return []
+
+ sizes = [len(f'{test}{delimiter}'.encode('utf8')) for test in tests]
+ if sum(sizes) <= max_chunk_size:
+ return [tests]
+
+ if any(size > max_chunk_size for size in sizes):
+ logger.warning(f'Dropping all test names because some names are longer '
+ f'than max_chunk_size of {max_chunk_size} bytes')
+ return []
+
+ chunks = []
+ while tests:
+ size = 0
+ length = 0
+ while length < len(tests) and size + sizes[length] < max_chunk_size:
+ size = size + sizes[length]
+ length = length + 1
+
+ chunks.append(tests[:length])
+ tests = tests[length:]
+ sizes = sizes[length:]
+
+ return chunks
+
+
+def create_tests_list_annotation(title: str, message: str, raw_details: Optional[str]) -> Annotation:
+ return Annotation(
+ path='.github',
+ start_line=0,
+ end_line=0,
+ start_column=None,
+ end_column=None,
+ annotation_level='notice',
+ message=message,
+ title=title,
+ raw_details=raw_details
+ )
diff --git a/python/publish/dart.py b/python/publish/dart.py
new file mode 100644
index 0000000..d93d67b
--- /dev/null
+++ b/python/publish/dart.py
@@ -0,0 +1,126 @@
+import json
+from collections import defaultdict
+from typing import Dict, Any, List
+
+from junitparser.junitparser import etree
+
+from publish.junit import JUnitTree
+
+
+def is_dart_json(path: str) -> bool:
+ if not path.endswith('.json'):
+ return False
+
+ try:
+ with open(path, 'rt') as r:
+ line = r.readline()
+ event = json.loads(line)
+ # {"protocolVersion":"0.1.1","runnerVersion":"1.23.1","pid":1705,"type":"start","time":0}
+ return event.get('type') == 'start' and 'protocolVersion' in event
+ except BaseException:
+ return False
+
+
+def parse_dart_json_file(path: str) -> JUnitTree:
+ tests: Dict[int, Dict[Any, Any]] = defaultdict(lambda: dict())
+ suites: Dict[int, Dict[Any, Any]] = defaultdict(lambda: dict())
+ suite_tests: Dict[int, List[Any]] = defaultdict(lambda: list())
+ suite_start = None
+ suite_time = None
+
+ with open(path, 'rt') as r:
+ for line in r:
+ # https://github.com/dart-lang/test/blob/master/pkgs/test/doc/json_reporter.md
+ event = json.loads(line)
+ type = event.get('type')
+
+ if type == 'start':
+ suite_start = event.get('time')
+ elif type == 'suite' and 'suite' in event and 'id' in event['suite']:
+ suite = event['suite']
+ id = suite['id']
+ suites[id]['path'] = suite.get('path')
+ suites[id]['start'] = event.get('time')
+ elif type == 'testStart' and 'test' in event and 'id' in event['test']:
+ test = event['test']
+ id = test['id']
+ tests[id]['name'] = test.get('name')
+ tests[id]['suite'] = test.get('suiteID')
+ tests[id]['line'] = test.get('line') # 1-based
+ tests[id]['column'] = test.get('column') # 1-based
+ tests[id]['url'] = test.get('url')
+ tests[id]['start'] = event.get('time')
+ if test.get('suiteID') is not None:
+ suite_tests[test.get('suiteID')].append(tests[id])
+ elif type == 'testDone' and 'testID' in event:
+ id = event['testID']
+ tests[id]['result'] = event.get('result')
+ tests[id]['hidden'] = event.get('hidden')
+ tests[id]['skipped'] = event.get('skipped')
+ tests[id]['end'] = event.get('time')
+ elif type == 'error' and 'testID' in event:
+ id = event['testID']
+ tests[id]['error'] = event.get('error')
+ tests[id]['stackTrace'] = event.get('stackTrace')
+ tests[id]['isFailure'] = event.get('isFailure')
+ elif type == 'print' and 'testID' in event and event.get('messageType') == 'skip':
+ tests[id]['reason'] = event.get('message')
+ elif type == 'done':
+ suite_time = event.get('time')
+
+ def create_test(test):
+ testcase = etree.Element('testcase', attrib={k: str(v) for k, v in dict(
+ name=test.get('name'),
+ file=test.get('url'),
+ line=test.get('line'),
+ time=(test['end'] - test['start']) / 1000.0 if test.get('start') is not None and test.get('end') is not None else None,
+ ).items() if isinstance(v, str) and v or v is not None})
+
+ test_result = test.get('result', 'error')
+ if test_result != 'success':
+ result = etree.Element('error' if test_result != 'failure' else test_result, attrib={k: v for k, v in dict(
+ message=test.get('error')
+ ).items() if v})
+ result.text = etree.CDATA('\n'.join(text
+ for text in [test.get('error'), test.get('stackTrace')]
+ if text))
+ testcase.append(result)
+ elif test.get('skipped', False):
+ result = etree.Element('skipped', attrib={k: v for k, v in dict(
+ message=test.get('reason')
+ ).items() if v})
+ testcase.append(result)
+
+ return testcase
+
+ def create_suite(suite, tests):
+ testsuite = etree.Element('testsuite', attrib={k: str(v) for k, v in dict(
+ name=suite.get('path'),
+ time=(suite['end'] - suite['start']) / 1000.0 if suite.get('start') is not None and suite.get('end') is not None else None,
+ tests=str(len(tests)),
+ failures=str(len([test for test in tests if test.get('isFailure', False)])),
+ errors=str(len([test for test in tests if not test.get('isFailure', True)])),
+ skipped=str(len([test for test in tests if test.get('skipped', False)])),
+ ).items() if isinstance(v, str) and v or v is not None})
+
+ testsuite.extend(create_test(test) for test in tests)
+
+ return testsuite
+
+ # do not count hidden tests (unless not successful)
+ visible_tests = [test for test in tests.values() if test.get('hidden') is not True or test.get('result') != 'success']
+ testsuites = etree.Element('testsuites', attrib={k: str(v) for k, v in dict(
+ time=(suite_time - suite_start) / 1000.0 if suite_start is not None and suite_time is not None else None,
+ tests=str(len(visible_tests)),
+ failures=str(len([test for test in visible_tests if test.get('isFailure', False)])),
+ errors=str(len([test for test in visible_tests if not test.get('isFailure', True)])),
+ skipped=str(len([test for test in visible_tests if test.get('skipped', False)])),
+ ).items() if v is not None})
+
+ testsuites.extend([create_suite(suite, [test
+ for test in suite_tests[suite_id]
+ if test.get('hidden') is not True])
+ for suite_id, suite in suites.items()])
+
+ xml = etree.ElementTree(testsuites)
+ return xml
diff --git a/python/publish/github_action.py b/python/publish/github_action.py
new file mode 100644
index 0000000..5ae1764
--- /dev/null
+++ b/python/publish/github_action.py
@@ -0,0 +1,186 @@
+import logging
+import os
+import sys
+import traceback
+from io import TextIOWrapper
+from typing import Mapping, Any, Optional
+
+from publish import logger
+
+
+class GithubAction:
+
+ # GitHub Actions environment file variable names
+ # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#environment-files
+ ENV_FILE_VAR_NAME = 'GITHUB_ENV'
+ PATH_FILE_VAR_NAME = 'GITHUB_PATH'
+ OUTPUT_FILE_VAR_NAME = 'GITHUB_OUTPUT'
+ JOB_SUMMARY_FILE_VAR_NAME = 'GITHUB_STEP_SUMMARY'
+
+ def __init__(self, file: Optional[TextIOWrapper] = None):
+ if file is None:
+ file = sys.stdout
+ # pre Python 3.7, TextIOWrapper does not have reconfigure
+ if isinstance(file, TextIOWrapper) and hasattr(file, 'reconfigure'):
+ # ensure we have utf8 encoding, the default encoding of sys.stdout on Windows is cp1252
+ file.reconfigure(encoding='utf-8')
+
+ self._file: TextIOWrapper = file
+
+ def add_mask(self, value: str):
+ self._command(self._file, 'add-mask', value)
+
+ def stop_commands(self, end_token: str):
+ self._command(self._file, 'stop-commands', end_token)
+
+ def continue_commands(self, end_token: str):
+ self._command(self._file, end_token)
+
+ def group(self, title: str):
+ self._command(self._file, 'group', title)
+
+ def group_end(self):
+ self._command(self._file, 'endgroup')
+
+ def debug(self, message: str):
+ logger.debug(message)
+ self._command(self._file, 'debug', message)
+
+ def notice(self,
+ message: str,
+ title: Optional[str] = None,
+ file: Optional[str] = None,
+ line: Optional[int] = None,
+ end_line: Optional[int] = None,
+ column: Optional[int] = None,
+ end_column: Optional[int] = None):
+ logger.info(message)
+
+ params = {var: val
+ for var, val in [("title", title),
+ ("file", file),
+ ("col", column),
+ ("endColumn", end_column),
+ ("line", line),
+ ("endLine", end_line)]
+ if val is not None}
+ self._command(self._file, 'notice', message, params)
+
+ def warning(self, message: str, file: Optional[str] = None, line: Optional[int] = None, column: Optional[int] = None):
+ logger.warning(message)
+
+ params = {}
+ if file is not None:
+ params.update(file=file)
+ if line is not None:
+ params.update(line=line)
+ if column is not None:
+ params.update(col=column)
+ self._command(self._file, 'warning', message, params)
+
+ def _exception(self, te: traceback.TracebackException):
+ def exception_str(te: traceback.TracebackException) -> str:
+ # we take the last line, which ends with a newline, that we strip
+ return list(te.format_exception_only())[-1].split('\n')[0]
+
+ self.error('{te}{caused}{context}'.format(
+ te=exception_str(te),
+ caused=f' caused by {exception_str(te.__cause__)}' if te.__cause__ else '',
+ context=f' while handling {exception_str(te.__context__)}' if te.__context__ else ''
+ ), exception=None)
+
+ for lines in te.format(chain=False):
+ for line in lines.split('\n'):
+ if line:
+ logger.debug(line)
+
+ cause = te.__cause__
+ while cause:
+ self._exception(cause)
+ cause = cause.__cause__
+
+ context = te.__context__
+ while context:
+ self._exception(context)
+ context = context.__context__
+
+ def error(self,
+ message: str,
+ file: Optional[str] = None, line: Optional[int] = None, column: Optional[int] = None,
+ exception: Optional[BaseException] = None):
+ if exception:
+ self._exception(traceback.TracebackException.from_exception(exception))
+ else:
+ logger.error(message)
+
+ params = {}
+ if file is not None:
+ params.update(file=file)
+ if line is not None:
+ params.update(line=line)
+ if column is not None:
+ params.update(col=column)
+ self._command(self._file, 'error', message, params)
+
+ def echo(self, on: bool):
+ self._command(self._file, 'echo', 'on' if on else 'off')
+
+ @staticmethod
+ def _command(file: TextIOWrapper, command: str, value: str = '', params: Optional[Mapping[str, Any]] = None):
+ # take first line of value if multiline
+ value = value.split('\n', 1)[0]
+
+ if params is None:
+ params = {}
+ params = ','.join([f'{key}={str(value)}'
+ for key, value in params.items()])
+ params = f' {params}' if params else ''
+
+ try:
+ file.write(f'::{command}{params}::{value}')
+ file.write(os.linesep)
+ except Exception as e:
+ logging.error(f'Failed to forward command {command} to GithubActions: {e}')
+
+ def add_to_env(self, var: str, val: str):
+ if '\n' in val:
+ # if this is really needed, implement it as describe here:
+ # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-strings
+ raise ValueError('Multiline values not supported for environment variables')
+ self._append_to_file(f'{var}={val}\n', self.ENV_FILE_VAR_NAME)
+
+ def add_to_path(self, path: str):
+ self._append_to_file(f'{path}\n', self.PATH_FILE_VAR_NAME)
+
+ def add_to_output(self, var: str, val: str):
+ if '\n' in val:
+ # if this is really needed, implement it as describe here:
+ # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-strings
+ raise ValueError('Multiline values not supported for environment variables')
+
+ if not self._append_to_file(f'{var}={val}\n', self.OUTPUT_FILE_VAR_NAME, warn=False):
+ # this has been deprecated but we fall back if there is no env file
+ self._command(self._file, 'set-output', val, {'name': var})
+
+ def add_to_job_summary(self, markdown: str):
+ self._append_to_file(markdown, self.JOB_SUMMARY_FILE_VAR_NAME)
+
+ def _append_to_file(self, content: str, env_file_var_name: str, warn: bool = True) -> bool:
+ # appends content to an environment file denoted by an environment variable name
+ # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#environment-files
+ filename = os.getenv(env_file_var_name)
+ if not filename:
+ if warn:
+ self.warning(f'Cannot append to environment file {env_file_var_name} as it is not set. '
+ f'See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#environment-files')
+ return False
+
+ try:
+ with open(filename, 'a', encoding='utf-8') as file:
+ file.write(content)
+ except Exception as e:
+ self.warning(f'Failed to write to environment file {filename}: {str(e)}. '
+ f'See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#environment-files')
+ return False
+
+ return True
diff --git a/python/publish/junit.py b/python/publish/junit.py
new file mode 100644
index 0000000..79a348c
--- /dev/null
+++ b/python/publish/junit.py
@@ -0,0 +1,346 @@
+import math
+import os
+from collections import defaultdict
+from typing import Optional, Iterable, Union, List, Dict, Callable, Tuple
+
+import junitparser
+from junitparser import Element, JUnitXml, JUnitXmlError, TestCase, TestSuite, Skipped
+from junitparser.junitparser import etree
+
+from publish.unittestresults import ParsedUnitTestResults, UnitTestSuite, UnitTestCase, ParseError
+
+try:
+ import lxml.etree
+ lxml_available = True
+except ImportError:
+ lxml_available = False
+
+
+def xml_has_root_element(path: str, allowed_root_elements: List[str]) -> bool:
+ try:
+ with open(path, 'rb') as r:
+ it = etree.iterparse(r, events=['start'])
+ action, elem = next(it, (None, None))
+ return action == 'start' and elem is not None and etree.QName(elem).localname in allowed_root_elements
+ except:
+ return False
+
+
+def is_junit(path: str) -> bool:
+ return xml_has_root_element(path, ['testsuites', 'testsuite'])
+
+
+def get_results(results: Union[Element, List[Element]], status: Optional[str] = None) -> List[Element]:
+ """
+ Returns the results with the most severe state.
+ For example: If there are failures and succeeded tests, returns only the failures.
+ """
+ if isinstance(results, List):
+ d = defaultdict(list)
+ for result in results:
+ if result:
+ d[get_result(result)].append(result)
+
+ for state in ['error', 'failure', 'success', 'skipped', 'disabled']:
+ if state in d:
+ return d[state]
+
+ if status and status in ['disabled']:
+ return [Disabled()]
+
+ return []
+
+ return [results]
+
+
+def get_result(results: Union[Element, List[Element]]) -> str:
+ """
+ Returns the result of the given results.
+ All results are expected to be of the same state.
+ :param results:
+ :return:
+ """
+ if isinstance(results, List):
+ return get_result(results[0]) if results else 'success'
+ return results._tag if results else 'success'
+
+
+def get_message(results: Union[Element, List[Element]]) -> str:
+ """
+ Returns an aggregated message from all given results.
+ :param results:
+ :return:
+ """
+ if isinstance(results, List):
+ messages = [result.message
+ for result in results
+ if result and result.message]
+ message = '\n'.join(messages) if messages else None
+ else:
+ message = results.message if results else None
+ return message
+
+
+def get_content(results: Union[Element, List[Element]]) -> str:
+ """
+ Returns an aggregated content form all given results.
+ :param results:
+ :return:
+ """
+ if isinstance(results, List):
+ contents = [result.text
+ for result in results
+ if result is not None and result.text is not None]
+ content = '\n'.join(contents) if contents else None
+ else:
+ content = results.text if results else None
+ return content
+
+
+class DropTestCaseBuilder(etree.TreeBuilder):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._stack = []
+
+ def start(self, tag: Union[str, bytes], attrs: Dict[Union[str, bytes], Union[str, bytes]]) -> Element:
+ self._stack.append(tag)
+ if junitparser.TestCase._tag not in self._stack:
+ return super().start(tag, attrs)
+
+ def end(self, tag: Union[str, bytes]) -> Element:
+ try:
+ if junitparser.TestCase._tag not in self._stack:
+ return super().end(tag)
+ finally:
+ if self._stack:
+ self._stack.pop()
+
+ def close(self) -> Element:
+ # when lxml is around, we have to return an ElementTree here, otherwise
+ # XMLParser(target=...).parse(..., parser=...)
+ # returns an Element, not a ElementTree, but junitparser expects an ElementTree
+ #
+ # https://lxml.de/parsing.html:
+ # Note that the parser does not build a tree when using a parser target. The result of the parser run is
+ # whatever the target object returns from its .close() method. If you want to return an XML tree here, you
+ # have to create it programmatically in the target object.
+ if lxml_available:
+ return lxml.etree.ElementTree(super().close())
+ else:
+ return super().close()
+
+
+JUnitTree = etree.ElementTree
+JUnitTreeOrParseError = Union[JUnitTree, ParseError]
+JUnitXmlOrParseError = Union[JUnitXml, ParseError]
+ParsedJUnitFile = Tuple[str, JUnitTreeOrParseError]
+
+
+def safe_parse_xml_file(path: str, parse: Callable[[str], JUnitTree]) -> JUnitTreeOrParseError:
+ """Parses an xml file and returns either a JUnitTree or a ParseError."""
+ if not os.path.exists(path):
+ return ParseError.from_exception(path, FileNotFoundError(f'File does not exist.'))
+ if os.stat(path).st_size == 0:
+ return ParseError.from_exception(path, Exception(f'File is empty.'))
+
+ try:
+ return parse(path)
+ except BaseException as e:
+ return ParseError.from_exception(path, e)
+
+
+def progress_safe_parse_xml_file(files: Iterable[str],
+ parse: Callable[[str], JUnitTree],
+ progress: Callable[[ParsedJUnitFile], ParsedJUnitFile]) -> Iterable[ParsedJUnitFile]:
+ return [progress((file, safe_parse_xml_file(file, parse))) for file in files]
+
+
+def parse_junit_xml_file(path: str, large_files: bool, drop_testcases: bool) -> JUnitTree:
+ if drop_testcases:
+ builder = DropTestCaseBuilder()
+ parser = etree.XMLParser(target=builder, encoding='utf-8', huge_tree=large_files)
+ return etree.parse(path, parser=parser)
+ elif large_files:
+ parser = etree.XMLParser(huge_tree=True)
+ return etree.parse(path, parser=parser)
+ return etree.parse(path)
+
+
+def parse_junit_xml_files(files: Iterable[str], large_files: bool, drop_testcases: bool,
+ progress: Callable[[ParsedJUnitFile], ParsedJUnitFile] = lambda x: x) -> Iterable[ParsedJUnitFile]:
+ """Parses junit xml files."""
+ def parse(path: str) -> JUnitTree:
+ return parse_junit_xml_file(path, large_files, drop_testcases)
+
+ return progress_safe_parse_xml_file(files, parse, progress)
+
+
+def adjust_prefix(file: Optional[str], prefix: Optional[str]) -> Optional[str]:
+ if prefix is None or file is None:
+ return file
+
+ # prefix starts either with '+' or '-'
+ if prefix.startswith('+'):
+ # add prefix
+ return "".join([prefix[1:], file])
+
+ # remove prefix
+ return file[len(prefix)-1:] if file.startswith(prefix[1:]) else file
+
+
+def process_junit_xml_elems(trees: Iterable[ParsedJUnitFile],
+ *,
+ time_factor: float = 1.0,
+ test_file_prefix: Optional[str] = None,
+ add_suite_details: bool = False) -> ParsedUnitTestResults:
+ def create_junitxml(filepath: str, tree: JUnitTree) -> JUnitXmlOrParseError:
+ try:
+ instance = JUnitXml.fromroot(tree.getroot())
+ instance.filepath = filepath
+ return instance
+ except JUnitXmlError as e:
+ return ParseError.from_exception(filepath, e)
+
+ processed = [(result_file, create_junitxml(result_file, tree) if not isinstance(tree, ParseError) else tree)
+ for result_file, tree in trees]
+ junits = [(result_file, junit)
+ for result_file, junit in processed
+ if not isinstance(junit, ParseError)]
+ errors = [error
+ for _, error in processed
+ if isinstance(error, ParseError)]
+
+ suites = [(result_file, suite)
+ for result_file, junit in junits
+ for suite in (junit if junit._tag == "testsuites" else [junit])]
+
+ suite_tests = sum([suite.tests for result_file, suite in suites if suite.tests])
+ suite_skipped = sum([suite.skipped + suite.disabled for result_file, suite in suites if suite.skipped and not math.isnan(suite.skipped)])
+ suite_failures = sum([suite.failures for result_file, suite in suites if suite.failures and not math.isnan(suite.failures)])
+ suite_errors = sum([suite.errors for result_file, suite in suites if suite.errors and not math.isnan(suite.errors)])
+ suite_time = int(sum([suite.time for result_file, suite in suites
+ if suite.time and not math.isnan(suite.time)]) * time_factor)
+
+ def int_opt(string: Optional[str]) -> Optional[int]:
+ try:
+ return int(string) if string else None
+ except ValueError:
+ return None
+
+ def get_cases(suite: TestSuite) -> List[TestCase]:
+ """
+ JUnit allows for testsuite tags inside testsuite tags at any depth.
+ https://llg.cubic.org/docs/junit/
+
+ This skips all inner testsuite tags and returns a list of all contained testcase tags.
+ """
+ suites = list(suite.iterchildren(TestSuite))
+ cases = list(suite.iterchildren(TestCase))
+ return [case
+ for suite in suites
+ for case in get_cases(suite)] + cases
+
+ def get_leaf_suites(suite: TestSuite) -> List[TestSuite]:
+ """
+ JUnit allows for testsuite tags inside testsuite tags at any depth.
+ https://llg.cubic.org/docs/junit/
+
+ This enumerates all leaf testsuite tags and those with testcases tags.
+ """
+ suites = list(suite.iterchildren(TestSuite))
+ cases = list(suite.iterchildren(TestCase))
+ return [leaf_suite
+ for suite in suites
+ for leaf_suite in get_leaf_suites(suite)] + ([suite] if cases or not suites else [])
+
+ leaf_suites = [leaf_suite
+ for _, suite in suites
+ for leaf_suite in get_leaf_suites(suite)]
+
+ def get_text(elem, tag):
+ child = elem.find(tag)
+ if child is not None:
+ text = child.text.strip()
+ return text if text else None
+ return None
+
+ suite_details = [
+ UnitTestSuite(
+ leaf_suite.name,
+ leaf_suite.tests,
+ leaf_suite.skipped,
+ leaf_suite.failures,
+ leaf_suite.errors,
+ get_text(leaf_suite._elem, 'system-out'),
+ get_text(leaf_suite._elem, 'system-err'),
+ )
+ for leaf_suite in leaf_suites
+ ] if add_suite_details else []
+
+ # junit allows for multiple results for a single test case (e.g. success and failure for the same test)
+ # we pick the most severe result, which could still be multiple results, so we aggregate those, which is messy
+ cases = [
+ UnitTestCase(
+ result_file=result_file,
+ test_file=adjust_prefix(case._elem.get('file'), test_file_prefix),
+ line=int_opt(case._elem.get('line')),
+ class_name=case.classname,
+ test_name=case.name,
+ result=get_result(results),
+ message=get_message(results),
+ content=get_content(results),
+ stdout=case.system_out,
+ stderr=case.system_err,
+ time=case.time * time_factor if case.time is not None else case.time
+ )
+ for result_file, suite in suites
+ for case in get_cases(suite)
+ if case.classname is not None or case.name is not None
+ # junit allows for multiple results in one test case, pick the most severe results
+ for results in [get_results(case.result, case.status)]
+ ]
+
+ return ParsedUnitTestResults(
+ files=len(list(trees)),
+ errors=errors,
+ # test state counts from suites
+ suites=len(leaf_suites),
+ suite_tests=suite_tests,
+ suite_skipped=suite_skipped,
+ suite_failures=suite_failures,
+ suite_errors=suite_errors,
+ suite_time=suite_time,
+ suite_details=suite_details,
+ # test cases
+ cases=cases
+ )
+
+
+@property
+def disabled(self) -> int:
+ disabled = self._elem.get('disabled', '0')
+ if disabled.isnumeric():
+ return int(disabled)
+ return 0
+
+
+# add special type of test case result to TestSuite
+TestSuite.disabled = disabled
+
+
+@property
+def status(self) -> str:
+ return self._elem.get('status')
+
+
+# special attribute of TestCase
+TestCase.status = status
+
+
+class Disabled(Skipped):
+ """Test result when the test is disabled."""
+
+ _tag = "disabled"
+
+ def __eq__(self, other):
+ return super(Disabled, self).__eq__(other)
diff --git a/python/publish/mocha.py b/python/publish/mocha.py
new file mode 100644
index 0000000..bd9d6a3
--- /dev/null
+++ b/python/publish/mocha.py
@@ -0,0 +1,75 @@
+import json
+
+from junitparser.junitparser import etree
+
+from publish.junit import JUnitTree
+
+
+def is_mocha_json(path: str) -> bool:
+ if not path.endswith('.json'):
+ return False
+
+ try:
+ with open(path, 'rt') as r:
+ results = json.load(r)
+ return 'stats' in results and isinstance(results.get('stats'), dict) and 'suites' in results.get('stats') and \
+ 'tests' in results and isinstance(results.get('tests'), list) and all(isinstance(test, dict) for test in results.get('tests')) and (
+ len(results.get('tests')) == 0 or all(test.get('fullTitle') for test in results.get('tests'))
+ )
+ except BaseException:
+ return False
+
+
+def parse_mocha_json_file(path: str) -> JUnitTree:
+ with open(path, 'rt') as r:
+ results = json.load(r)
+
+ stats = results.get('stats', {})
+ skippedTests = {test.get('fullTitle') for test in results.get('pending', [])}
+ suite = etree.Element('testsuite', attrib={k: str(v) for k, v in dict(
+ time=stats.get('duration'),
+ timestamp=stats.get('start')
+ ).items() if v})
+
+ tests = 0
+ failures = 0
+ errors = 0
+ skipped = 0
+ for test in results.get('tests', []):
+ tests = tests + 1
+ testcase = etree.Element('testcase',
+ attrib={k: str(v) for k, v in dict(
+ name=test.get('fullTitle'),
+ file=test.get('file'),
+ time=test.get('duration')
+ ).items() if v}
+ )
+
+ err = test.get('err')
+ if err:
+ if err.get('errorMode'):
+ errors = errors + 1
+ type = 'error'
+ else:
+ failures = failures + 1
+ type = 'failure'
+
+ result = etree.Element(type, attrib={k: v for k, v in dict(
+ message=err.get('message').translate(dict.fromkeys(range(32))),
+ type=err.get('errorMode')
+ ).items() if v})
+ result.text = etree.CDATA('\n'.join(text.translate(dict.fromkeys(range(32)))
+ for text in [err.get('name'), err.get('message'), err.get('stack')]
+ if text))
+ testcase.append(result)
+ elif test.get('fullTitle') in skippedTests:
+ skipped = skipped + 1
+ result = etree.Element('skipped')
+ testcase.append(result)
+
+ suite.append(testcase)
+
+ suite.attrib.update(dict(tests=str(tests), failures=str(failures), errors=str(errors), skipped=str(skipped)))
+ xml = etree.ElementTree(suite)
+
+ return xml
diff --git a/python/publish/nunit.py b/python/publish/nunit.py
new file mode 100644
index 0000000..8708bf2
--- /dev/null
+++ b/python/publish/nunit.py
@@ -0,0 +1,31 @@
+import pathlib
+from typing import Iterable, Callable
+
+from lxml import etree
+
+from publish.junit import JUnitTree, ParsedJUnitFile, progress_safe_parse_xml_file, xml_has_root_element
+
+with (pathlib.Path(__file__).resolve().parent / 'xslt' / 'nunit3-to-junit.xslt').open('r', encoding='utf-8') as r:
+ transform_nunit_to_junit = etree.XSLT(etree.parse(r), regexp=False, access_control=etree.XSLTAccessControl.DENY_ALL)
+
+
+def is_nunit(path: str) -> bool:
+ return xml_has_root_element(path, ['test-results', 'test-run', 'test-suite'])
+
+
+def parse_nunit_file(path: str, large_files: bool) -> JUnitTree:
+ if large_files:
+ parser = etree.XMLParser(huge_tree=True)
+ nunit = etree.parse(path, parser=parser)
+ else:
+ nunit = etree.parse(path)
+ return transform_nunit_to_junit(nunit)
+
+
+def parse_nunit_files(files: Iterable[str], large_files: bool,
+ progress: Callable[[ParsedJUnitFile], ParsedJUnitFile] = lambda x: x) -> Iterable[ParsedJUnitFile]:
+ """Parses nunit files."""
+ def parse(path: str) -> JUnitTree:
+ return parse_nunit_file(path, large_files)
+
+ return progress_safe_parse_xml_file(files, parse, progress)
diff --git a/python/publish/progress.py b/python/publish/progress.py
new file mode 100644
index 0000000..4137e84
--- /dev/null
+++ b/python/publish/progress.py
@@ -0,0 +1,86 @@
+import contextlib
+from datetime import datetime
+from logging import Logger
+from threading import Timer
+from typing import Generic, TypeVar, Optional, Callable, Type, Any
+
+import humanize
+
+from publish import punctuation_space
+
+T = TypeVar('T')
+
+
+@contextlib.contextmanager
+def progress_logger(items: int,
+ interval_seconds: int,
+ progress_template: str,
+ finish_template: Optional[str],
+ logger: Logger,
+ progress_item_type: Type[T] = Any) -> Callable[[T], T]:
+ progress = Progress[progress_item_type](items)
+ plogger = ProgressLogger(progress, interval_seconds, progress_template, logger).start()
+ try:
+ yield progress.observe
+ finally:
+ plogger.finish(finish_template)
+
+
+class Progress(Generic[T]):
+ def __init__(self, items: int):
+ self.items = items
+ self.observations = 0
+
+ def observe(self, observation: T) -> T:
+ self.observations = self.observations + 1
+ return observation
+
+ def get_progress(self) -> str:
+ return '{observations:,} of {items:,}'.format(
+ observations=self.observations, items=self.items
+ ).replace(',', punctuation_space)
+
+
+class ProgressLogger:
+ def __init__(self, progress: Progress, interval_seconds: int, template: str, logger: Logger):
+ self._progress = progress
+ self._interval_seconds = interval_seconds
+ self._template = template
+ self._logger = logger
+
+ self._start = None
+ self._duration = None
+ self._timer = self._get_progress_timer()
+
+ def start(self) -> 'ProgressLogger':
+ self._start = datetime.utcnow()
+ self._timer.start()
+ return self
+
+ def finish(self, template: Optional[str] = None):
+ self._duration = datetime.utcnow() - self._start
+ self._start = None
+ self._timer.cancel()
+
+ if template:
+ self._logger.info(template.format(items=self._progress.items,
+ observations=self._progress.observations,
+ duration=self.duration))
+
+ @property
+ def duration(self) -> str:
+ return humanize.precisedelta(self._duration)
+
+ def _get_progress_timer(self):
+ timer = Timer(self._interval_seconds, self._log_progress)
+ timer.setDaemon(daemonic=True)
+ return timer
+
+ def _log_progress(self):
+ if self._start is None:
+ return
+
+ delta = datetime.utcnow() - self._start
+ self._logger.info(self._template.format(progress=self._progress.get_progress(), time=humanize.precisedelta(delta)))
+ self._timer = self._get_progress_timer()
+ self._timer.start()
diff --git a/python/publish/publisher.py b/python/publish/publisher.py
new file mode 100644
index 0000000..ca96127
--- /dev/null
+++ b/python/publish/publisher.py
@@ -0,0 +1,740 @@
+import dataclasses
+import json
+import logging
+import os
+import re
+from dataclasses import dataclass
+from typing import List, Set, Any, Optional, Tuple, Mapping, Dict, Union, Callable
+from copy import deepcopy
+
+from github import Github, GithubException, UnknownObjectException
+from github.CheckRun import CheckRun
+from github.CheckRunAnnotation import CheckRunAnnotation
+from github.PullRequest import PullRequest
+from github.IssueComment import IssueComment
+
+from publish import __version__, get_json_path, comment_mode_off, digest_prefix, restrict_unicode_list, \
+ comment_mode_always, comment_mode_changes, comment_mode_changes_failures, comment_mode_changes_errors, \
+ comment_mode_failures, comment_mode_errors, \
+ get_stats_from_digest, digest_header, get_short_summary, get_long_summary_md, \
+ get_long_summary_with_digest_md, get_error_annotations, get_case_annotations, get_suite_annotations, \
+ get_all_tests_list_annotation, get_skipped_tests_list_annotation, get_all_tests_list, \
+ get_skipped_tests_list, all_tests_list, skipped_tests_list, pull_request_build_mode_merge, \
+ Annotation, SomeTestChanges
+from publish import logger
+from publish.github_action import GithubAction
+from publish.unittestresults import UnitTestCaseResults, UnitTestRunResults, UnitTestRunDeltaResults, \
+ UnitTestRunResultsOrDeltaResults, get_stats_delta, create_unit_test_case_results
+
+
+@dataclass(frozen=True)
+class Settings:
+ token: str
+ actor: str
+ api_url: str
+ graphql_url: str
+ api_retries: int
+ event: dict
+ event_file: Optional[str]
+ event_name: str
+ is_fork: bool
+ repo: str
+ commit: str
+ json_file: Optional[str]
+ json_thousands_separator: str
+ json_suite_details: bool
+ json_test_case_results: bool
+ fail_on_errors: bool
+ fail_on_failures: bool
+ action_fail: bool
+ action_fail_on_inconclusive: bool
+ # one of these *files_glob must be set
+ files_glob: Optional[str]
+ junit_files_glob: Optional[str]
+ nunit_files_glob: Optional[str]
+ xunit_files_glob: Optional[str]
+ trx_files_glob: Optional[str]
+ test_file_prefix: Optional[str]
+ time_factor: float
+ check_name: str
+ comment_title: str
+ comment_mode: str
+ job_summary: bool
+ compare_earlier: bool
+ pull_request_build: str
+ test_changes_limit: int
+ report_individual_runs: bool
+ report_suite_out_logs: bool
+ report_suite_err_logs: bool
+ dedup_classes_by_file_name: bool
+ large_files: bool
+ ignore_runs: bool
+ check_run_annotation: List[str]
+ seconds_between_github_reads: float
+ seconds_between_github_writes: float
+ secondary_rate_limit_wait_seconds: float
+ search_pull_requests: bool
+
+
+@dataclasses.dataclass(frozen=True)
+class PublishData:
+ title: str
+ summary: str
+ conclusion: str
+ stats: UnitTestRunResults
+ stats_with_delta: Optional[UnitTestRunDeltaResults]
+ annotations: List[Annotation]
+ check_url: str
+ cases: Optional[UnitTestCaseResults]
+
+ def without_exceptions(self) -> 'PublishData':
+ return dataclasses.replace(
+ self,
+ # remove exceptions
+ stats=self.stats.without_exceptions(),
+ stats_with_delta=self.stats_with_delta.without_exceptions() if self.stats_with_delta else None,
+ # turn defaultdict into simple dict
+ cases={test: {state: cases for state, cases in states.items()}
+ for test, states in self.cases.items()} if self.cases else None
+ )
+
+ def without_suite_details(self) -> 'PublishData':
+ return dataclasses.replace(self, stats=self.stats.without_suite_details())
+
+ def without_cases(self) -> 'PublishData':
+ return dataclasses.replace(self, cases=None)
+
+ @classmethod
+ def _format_digit(cls, value: Union[int, Mapping[str, int], Any], thousands_separator: str) -> Union[str, Mapping[str, str], Any]:
+ if isinstance(value, int):
+ return f'{value:,}'.replace(',', thousands_separator)
+ if isinstance(value, Mapping):
+ return {k: cls._format_digit(v, thousands_separator) for (k, v) in value.items()}
+ return value
+
+ @classmethod
+ def _format(cls, stats: Mapping[str, Any], thousands_separator: str) -> Dict[str, Any]:
+ return {k: cls._format_digit(v, thousands_separator) for (k, v) in stats.items()}
+
+ @classmethod
+ def _formatted_stats_and_delta(cls,
+ stats: Optional[Mapping[str, Any]],
+ stats_with_delta: Optional[Mapping[str, Any]],
+ thousands_separator: str) -> Mapping[str, Any]:
+ d = {}
+ if stats is not None:
+ d.update(stats=cls._format(stats, thousands_separator))
+ if stats_with_delta is not None:
+ d.update(stats_with_delta=cls._format(stats_with_delta, thousands_separator))
+ return d
+
+ def _as_dict(self) -> Dict[str, Any]:
+ # the dict_factory removes None values
+ return dataclasses.asdict(self, dict_factory=lambda x: {k: v for (k, v) in x if v is not None})
+
+ def to_dict(self, thousands_separator: str, with_suite_details: bool, with_cases: bool) -> Mapping[str, Any]:
+ data = self.without_exceptions()
+ if not with_suite_details:
+ data = data.without_suite_details()
+ if not with_cases:
+ data = data.without_cases()
+ d = data._as_dict()
+
+ # beautify cases, turn tuple-key into proper fields
+ if d.get('cases'):
+ d['cases'] = [{k: v for k, v in [('file_name', test[0]),
+ ('class_name', test[1]),
+ ('test_name', test[2]),
+ ('states', states)]
+ if v}
+ for test, states in d['cases'].items()]
+
+ # provide formatted stats and delta
+ d.update(formatted=self._formatted_stats_and_delta(
+ d.get('stats'), d.get('stats_with_delta'), thousands_separator
+ ))
+
+ return d
+
+ def to_reduced_dict(self, thousands_separator: str) -> Mapping[str, Any]:
+ # remove exceptions, suite details and cases
+ data = self.without_exceptions().without_suite_details().without_cases()._as_dict()
+
+ # replace some large fields with their lengths and delete individual test cases if present
+ def reduce(d: Dict[str, Any]) -> Dict[str, Any]:
+ d = deepcopy(d)
+ if d.get('stats', {}).get('errors') is not None:
+ d['stats']['errors'] = len(d['stats']['errors'])
+ if d.get('stats_with_delta', {}).get('errors') is not None:
+ d['stats_with_delta']['errors'] = len(d['stats_with_delta']['errors'])
+ if d.get('annotations') is not None:
+ d['annotations'] = len(d['annotations'])
+ return d
+
+ data = reduce(data)
+ data.update(formatted=self._formatted_stats_and_delta(
+ data.get('stats'), data.get('stats_with_delta'), thousands_separator
+ ))
+
+ return data
+
+
+class Publisher:
+
+ def __init__(self, settings: Settings, gh: Github, gha: GithubAction):
+ self._settings = settings
+ self._gh = gh
+ self._gha = gha
+ self._repo = gh.get_repo(self._settings.repo)
+ self._req = gh._Github__requester
+
+ def publish(self,
+ stats: UnitTestRunResults,
+ cases: UnitTestCaseResults,
+ conclusion: str):
+ logger.info(f'Publishing {conclusion} results for commit {self._settings.commit}')
+ if logger.isEnabledFor(logging.DEBUG):
+ logger.debug(f'Publishing {stats}')
+
+ if self._settings.is_fork:
+ # running on a fork, we cannot publish the check, but we can still read before_check_run
+ # bump the version if you change the target of this link (if it did not exist already) or change the section
+ logger.info('This action is running on a pull_request event for a fork repository. '
+ 'Pull request comments and check runs cannot be created, so disabling these features. '
+ 'To fully run the action on fork repository pull requests, see '
+ f'https://github.com/step-security/publish-unit-test-result-action/blob/{__version__}/README.md#support-fork-repositories-and-dependabot-branches')
+ check_run = None
+ before_check_run = None
+ if self._settings.compare_earlier:
+ before_commit_sha = get_json_path(self._settings.event, 'before')
+ logger.debug(f'comparing against before={before_commit_sha}')
+ before_check_run = self.get_check_run(before_commit_sha)
+ else:
+ check_run, before_check_run = self.publish_check(stats, cases, conclusion)
+
+ if self._settings.job_summary:
+ self.publish_job_summary(self._settings.comment_title, stats, check_run, before_check_run)
+
+ if not self._settings.is_fork:
+ if self._settings.comment_mode != comment_mode_off:
+ pulls = self.get_pulls(self._settings.commit)
+ if pulls:
+ for pull in pulls:
+ self.publish_comment(self._settings.comment_title, stats, pull, check_run, cases)
+ else:
+ logger.info(f'There is no pull request for commit {self._settings.commit}')
+ else:
+ logger.info('Commenting on pull requests disabled')
+
+ def get_pull_from_event(self) -> Optional[PullRequest]:
+ number = get_json_path(self._settings.event, 'pull_request.number')
+ repo = get_json_path(self._settings.event, 'pull_request.base.repo.full_name')
+ if number is None or repo is None or repo != self._settings.repo:
+ return None
+
+ try:
+ return self._repo.get_pull(number)
+ except UnknownObjectException:
+ return None
+
+ def get_pulls_from_commit(self, commit: str) -> List[PullRequest]:
+ try:
+ # totalCount of PaginatedList calls the GitHub API just to get the total number
+ # we have to retrieve them all anyway so better do this once by materialising the PaginatedList via list()
+ return list(self._repo.get_commit(commit).get_pulls())
+ except UnknownObjectException:
+ return []
+
+ def get_all_pulls(self, commit: str) -> List[PullRequest]:
+ if self._settings.search_pull_requests:
+ # totalCount of PaginatedList calls the GitHub API just to get the total number
+ # we have to retrieve them all anyway so better do this once by materialising the PaginatedList via list()
+ issues = list(self._gh.search_issues(f'type:pr repo:"{self._settings.repo}" {commit}'))
+ pull_requests = [issue.as_pull_request() for issue in issues]
+ else:
+ pull_request = self.get_pull_from_event()
+ pull_requests = [pull_request] if pull_request is not None else self.get_pulls_from_commit(commit)
+
+ logger.debug(f'found {len(pull_requests)} pull requests in repo {self._settings.repo} containing commit {commit}')
+ return pull_requests
+
+ def get_pulls(self, commit: str) -> List[PullRequest]:
+ # get all pull requests associated with this commit
+ # TODO: simplify to event pr only, breaking change for version 3.0
+ pull_requests = self.get_all_pulls(commit)
+
+ if logger.isEnabledFor(logging.DEBUG):
+ for pr in pull_requests:
+ logger.debug(pr)
+ logger.debug(pr.raw_data)
+ logger.debug(f'PR {pr.html_url}: {pr.head.repo.full_name} -> {pr.base.repo.full_name}')
+
+ # we can only publish the comment to PRs that are in the same repository as this action is executed in
+ # so pr.base.repo.full_name must be same as GITHUB_REPOSITORY / self._settings.repo
+ # we won't have permission otherwise
+ pulls = list([pr
+ for pr in pull_requests
+ if pr.base.repo.full_name == self._settings.repo])
+
+ if len(pulls) == 0:
+ logger.debug(f'found no pull requests in repo {self._settings.repo} for commit {commit}')
+ return []
+
+ # we only comment on PRs that have the commit as their current head or merge commit
+ pulls = [pull for pull in pulls if commit in [pull.head.sha, pull.merge_commit_sha]]
+ if len(pulls) == 0:
+ logger.debug(f'found no pull request in repo {self._settings.repo} with '
+ f'commit {commit} as current head or merge commit')
+ return []
+
+ # only comment on the open PRs
+ pulls = [pull for pull in pulls if pull.state == 'open']
+ if len(pulls) == 0:
+ logger.debug(f'found multiple pull requests in repo {self._settings.repo} with '
+ f'commit {commit} as current head or merge commit but none is open')
+
+ for pull in pulls:
+ logger.debug(f'found open pull request #{pull.number} with commit {commit} as current head or merge commit')
+ return pulls
+
+ def get_stats_from_commit(self, commit_sha: str) -> Optional[UnitTestRunResults]:
+ check_run = self.get_check_run(commit_sha)
+ return self.get_stats_from_check_run(check_run) if check_run is not None else None
+
+ def get_check_run(self, commit_sha: str) -> Optional[CheckRun]:
+ if commit_sha is None or commit_sha == '0000000000000000000000000000000000000000':
+ return None
+
+ commit = None
+ try:
+ commit = self._repo.get_commit(commit_sha)
+ except GithubException as e:
+ if e.status == 422:
+ self._gha.warning(str(e.data))
+ else:
+ raise e
+
+ if commit is None:
+ self._gha.error(f'Could not find commit {commit_sha}')
+ return None
+
+ runs = commit.get_check_runs()
+ # totalCount calls the GitHub API, so better not do this if we are not logging the result anyway
+ if logger.isEnabledFor(logging.DEBUG):
+ logger.debug(f'found {runs.totalCount} check runs for commit {commit_sha}')
+
+ return self.get_check_run_from_list(list(runs))
+
+ def get_check_run_from_list(self, runs: List[CheckRun]) -> Optional[CheckRun]:
+ # filter for runs with the same name as configured
+ runs = [run for run in runs if run.name == self._settings.check_name]
+ logger.debug(f'there are {len(runs)} check runs with title {self._settings.check_name}')
+ if len(runs) == 0:
+ return None
+ if len(runs) == 1:
+ return runs[0]
+
+ # filter based on summary
+ runs = [run for run in runs if run.output.summary and digest_prefix in run.output.summary]
+ logger.debug(f'there are {len(runs)} check runs with a test result summary')
+ if len(runs) == 0:
+ return None
+ if len(runs) == 1:
+ return runs[0]
+
+ # filter for completed runs
+ runs = [run for run in runs if run.status == 'completed']
+ logger.debug(f'there are {len(runs)} check runs with completed status')
+ if len(runs) == 0:
+ return None
+ if len(runs) == 1:
+ return runs[0]
+
+ # pick run that started latest
+ return sorted(runs, key=lambda run: run.started_at, reverse=True)[0]
+
+ @staticmethod
+ def get_stats_from_check_run(check_run: CheckRun) -> Optional[UnitTestRunResults]:
+ summary = check_run.output.summary
+ if summary is None:
+ return None
+ for line in summary.split('\n'):
+ logger.debug(f'summary: {line}')
+
+ return Publisher.get_stats_from_summary_md(summary)
+
+ @staticmethod
+ def get_stats_from_summary_md(summary: str) -> Optional[UnitTestRunResults]:
+ start = summary.index(digest_header) if digest_header in summary else None
+ if start:
+ digest = summary[start + len(digest_header):]
+ end = digest.index('\n') if '\n' in digest else None
+ if end:
+ digest = digest[:end]
+ logger.debug(f'digest: {digest}')
+ stats = get_stats_from_digest(digest)
+ logger.debug(f'stats: {stats}')
+ return stats
+
+ @staticmethod
+ def get_test_list_from_annotation(annotation: CheckRunAnnotation) -> Optional[List[str]]:
+ if annotation is None or not annotation.raw_details:
+ return None
+ return annotation.raw_details.split('\n')
+
+ def publish_check(self,
+ stats: UnitTestRunResults,
+ cases: UnitTestCaseResults,
+ conclusion: str) -> Tuple[CheckRun, Optional[CheckRun]]:
+ # get stats from earlier commits
+ before_stats = None
+ before_check_run = None
+ if self._settings.compare_earlier:
+ before_commit_sha = get_json_path(self._settings.event, 'before')
+ logger.debug(f'comparing against before={before_commit_sha}')
+ before_check_run = self.get_check_run(before_commit_sha)
+ before_stats = self.get_stats_from_check_run(before_check_run) if before_check_run is not None else None
+ stats_with_delta = get_stats_delta(stats, before_stats, 'earlier') if before_stats is not None else stats
+ logger.debug(f'stats with delta: {stats_with_delta}')
+
+ error_annotations = get_error_annotations(stats.errors)
+ case_annotations = get_case_annotations(cases, self._settings.report_individual_runs)
+ output_annotations = get_suite_annotations(stats.suite_details, self._settings.report_suite_out_logs, self._settings.report_suite_err_logs)
+ test_list_annotations = self.get_test_list_annotations(cases)
+ all_annotations = error_annotations + case_annotations + output_annotations + test_list_annotations
+
+ title = get_short_summary(stats)
+ summary = get_long_summary_md(stats_with_delta)
+
+ # we can send only 50 annotations at once, so we split them into chunks of 50
+ check_run = None
+ summary_with_digest = get_long_summary_with_digest_md(stats_with_delta, stats)
+ split_annotations = [annotation.to_dict() for annotation in all_annotations]
+ split_annotations = [split_annotations[x:x+50] for x in range(0, len(split_annotations), 50)] or [[]]
+ for annotations in split_annotations:
+ output = dict(
+ title=title,
+ summary=summary_with_digest,
+ annotations=annotations
+ )
+
+ if check_run is None:
+ logger.debug(f'creating check with {len(annotations)} annotations')
+ check_run = self._repo.create_check_run(name=self._settings.check_name,
+ head_sha=self._settings.commit,
+ status='completed',
+ conclusion=conclusion,
+ output=output)
+ logger.info(f'Created check {check_run.html_url}')
+ else:
+ logger.debug(f'updating check with {len(annotations)} more annotations')
+ check_run.edit(output=output)
+ logger.debug(f'updated check')
+
+ # create full json
+ data = PublishData(
+ title=title,
+ summary=summary,
+ conclusion=conclusion,
+ stats=stats,
+ stats_with_delta=stats_with_delta if before_stats is not None else None,
+ annotations=all_annotations,
+ check_url=check_run.html_url,
+ cases=cases
+ )
+ self.publish_json(data)
+
+ return check_run, before_check_run
+
+ def publish_json(self, data: PublishData):
+ if self._settings.json_file:
+ try:
+ with open(self._settings.json_file, 'wt', encoding='utf-8') as w:
+ json.dump(data.to_dict(
+ self._settings.json_thousands_separator,
+ self._settings.json_suite_details,
+ self._settings.json_test_case_results
+ ), w, ensure_ascii=False)
+ except Exception as e:
+ self._gha.error(f'Failed to write JSON file {self._settings.json_file}: {str(e)}')
+ try:
+ os.unlink(self._settings.json_file)
+ except:
+ pass
+
+ # provide a reduced version to Github actions
+ self._gha.add_to_output('json', json.dumps(data.to_reduced_dict(self._settings.json_thousands_separator), ensure_ascii=False))
+
+ def publish_job_summary(self,
+ title: str,
+ stats: UnitTestRunResults,
+ check_run: CheckRun,
+ before_check_run: Optional[CheckRun]):
+ before_stats = self.get_stats_from_check_run(before_check_run) if before_check_run is not None else None
+ stats_with_delta = get_stats_delta(stats, before_stats, 'earlier') if before_stats is not None else stats
+
+ details_url = check_run.html_url if check_run else None
+ summary = get_long_summary_md(stats_with_delta, details_url)
+ markdown = f'## {title}\n{summary}'
+ self._gha.add_to_job_summary(markdown)
+ logger.info(f'Created job summary')
+
+ @staticmethod
+ def get_test_lists_from_check_run(check_run: Optional[CheckRun]) -> Tuple[Optional[List[str]], Optional[List[str]]]:
+ if check_run is None:
+ return None, None
+
+ all_tests_title_regexp = re.compile(r'^\d+ test(s)? found( \(test \d+ to \d+\))?$')
+ skipped_tests_title_regexp = re.compile(r'^\d+ skipped test(s)? found( \(test \d+ to \d+\))?$')
+
+ all_tests_message_regexp = re.compile(
+ r'^(There is 1 test, see "Raw output" for the name of the test)|'
+ r'(There are \d+ tests, see "Raw output" for the full list of tests)|'
+ r'(There are \d+ tests, see "Raw output" for the list of tests \d+ to \d+)\.$')
+ skipped_tests_message_regexp = re.compile(
+ r'^(There is 1 skipped test, see "Raw output" for the name of the skipped test)|'
+ r'(There are \d+ skipped tests, see "Raw output" for the full list of skipped tests)|'
+ r'(There are \d+ skipped tests, see "Raw output" for the list of skipped tests \d+ to \d+)\.$')
+
+ annotations = list(check_run.get_annotations())
+ all_tests_list = Publisher.get_test_list_from_annotations(annotations, all_tests_title_regexp, all_tests_message_regexp)
+ skipped_tests_list = Publisher.get_test_list_from_annotations(annotations, skipped_tests_title_regexp, skipped_tests_message_regexp)
+
+ return all_tests_list or None, skipped_tests_list or None
+
+ @staticmethod
+ def get_test_list_from_annotations(annotations: List[CheckRunAnnotation],
+ title_regexp, message_regexp) -> List[str]:
+ test_annotations: List[CheckRunAnnotation] = []
+
+ for annotation in annotations:
+ if annotation and annotation.title and annotation.message and annotation.raw_details and \
+ title_regexp.match(annotation.title) and \
+ message_regexp.match(annotation.message):
+ test_annotations.append(annotation)
+
+ test_lists = [Publisher.get_test_list_from_annotation(test_annotation)
+ for test_annotation in test_annotations]
+ test_list = [test
+ for test_list in test_lists
+ if test_list
+ for test in test_list]
+ return test_list
+
+ def get_test_list_annotations(self, cases: UnitTestCaseResults, max_chunk_size: int = 64000) -> List[Annotation]:
+ all_tests = get_all_tests_list_annotation(cases, max_chunk_size) \
+ if all_tests_list in self._settings.check_run_annotation else []
+ skipped_tests = get_skipped_tests_list_annotation(cases, max_chunk_size) \
+ if skipped_tests_list in self._settings.check_run_annotation else []
+ return [annotation for annotation in skipped_tests + all_tests if annotation]
+
+ def publish_comment(self,
+ title: str,
+ stats: UnitTestRunResults,
+ pull_request: PullRequest,
+ check_run: Optional[CheckRun] = None,
+ cases: Optional[UnitTestCaseResults] = None):
+ # compare them with earlier stats
+ base_check_run = None
+ if self._settings.compare_earlier:
+ base_commit_sha = self.get_base_commit_sha(pull_request)
+ if stats.commit == base_commit_sha:
+ # we do not publish a comment when we compare the commit to itself
+ # that would overwrite earlier comments without change stats
+ return pull_request
+ logger.debug(f'comparing against base={base_commit_sha}')
+ base_check_run = self.get_check_run(base_commit_sha)
+ base_stats = self.get_stats_from_check_run(base_check_run) if base_check_run is not None else None
+ stats_with_delta = get_stats_delta(stats, base_stats, 'base') if base_stats is not None else stats
+ logger.debug(f'stats with delta: {stats_with_delta}')
+
+ # gather test lists from check run and cases
+ before_all_tests, before_skipped_tests = self.get_test_lists_from_check_run(base_check_run)
+ all_tests, skipped_tests = get_all_tests_list(cases), get_skipped_tests_list(cases)
+ # 'before' test names are retrieved from check runs, which have restricted unicode
+ # so we have to apply the same restriction to the test names retrieved from cases, so that they match
+ all_tests, skipped_tests = restrict_unicode_list(all_tests), restrict_unicode_list(skipped_tests)
+ test_changes = SomeTestChanges(before_all_tests, all_tests, before_skipped_tests, skipped_tests)
+
+ latest_comment = self.get_latest_comment(pull_request)
+ latest_comment_body = latest_comment.body if latest_comment else None
+
+ # are we required to create a comment on this PR?
+ earlier_stats = self.get_stats_from_summary_md(latest_comment_body) if latest_comment_body else None
+ if not self.require_comment(stats_with_delta, earlier_stats):
+ logger.info(f'No pull request comment required as comment mode is {self._settings.comment_mode} (comment_mode)')
+ return
+
+ details_url = check_run.html_url if check_run else None
+ summary = get_long_summary_with_digest_md(stats_with_delta, stats, details_url, test_changes, self._settings.test_changes_limit)
+ body = f'## {title}\n{summary}'
+
+ # only create new comment none exists already
+ if latest_comment is None:
+ comment = pull_request.create_issue_comment(body)
+ logger.info(f'Created comment for pull request #{pull_request.number}: {comment.html_url}')
+ else:
+ self.reuse_comment(latest_comment, body)
+ logger.info(f'Edited comment for pull request #{pull_request.number}: {latest_comment.html_url}')
+
+ def require_comment(self,
+ stats: UnitTestRunResultsOrDeltaResults,
+ earlier_stats: Optional[UnitTestRunResults]) -> bool:
+ # SomeTestChanges.has_changes cannot be used here as changes between earlier comment
+ # and current results cannot be identified
+
+ if self._settings.comment_mode == comment_mode_always:
+ logger.debug(f'Comment required as comment mode is {self._settings.comment_mode}')
+ return True
+
+ # helper method to detect if changes require a comment
+ def do_changes_require_comment(earlier_stats_is_different_to: Optional[Callable[[UnitTestRunResultsOrDeltaResults], bool]],
+ stats_has_changes: bool,
+ flavour: str = '') -> bool:
+ in_flavour = ''
+ if flavour:
+ flavour = f'{flavour} '
+ in_flavour = f'in {flavour}'
+
+ if earlier_stats is not None and earlier_stats_is_different_to(stats):
+ logger.info(f'Comment required as comment mode is "{self._settings.comment_mode}" '
+ f'and {flavour}statistics are different to earlier comment')
+ logger.debug(f'earlier: {earlier_stats}')
+ logger.debug(f'current: {stats.without_delta() if stats.is_delta else stats}')
+ return True
+ if not stats.is_delta:
+ logger.info(f'Comment required as comment mode is "{self._settings.comment_mode}" '
+ f'but no delta statistics to target branch available')
+ return True
+ if stats_has_changes:
+ logger.info(f'Comment required as comment mode is "{self._settings.comment_mode}" '
+ f'and changes {in_flavour} to target branch exist')
+ logger.debug(f'current: {stats}')
+ return True
+ return False
+
+ if self._settings.comment_mode == comment_mode_changes and \
+ do_changes_require_comment(earlier_stats.is_different if earlier_stats else None,
+ stats.is_delta and stats.has_changes):
+ return True
+
+ if self._settings.comment_mode == comment_mode_changes_failures and \
+ do_changes_require_comment(earlier_stats.is_different_in_failures if earlier_stats else None,
+ stats.is_delta and stats.has_failure_changes,
+ 'failures'):
+ return True
+
+ if self._settings.comment_mode in [comment_mode_changes_failures, comment_mode_changes_errors] and \
+ do_changes_require_comment(earlier_stats.is_different_in_errors if earlier_stats else None,
+ stats.is_delta and stats.has_error_changes,
+ 'errors'):
+ return True
+
+ # helper method to detect if stats require a comment
+ def do_stats_require_comment(earlier_stats_require: Optional[bool], stats_require: bool, flavour: str) -> bool:
+ if earlier_stats is not None and earlier_stats_require:
+ logger.info(f'Comment required as comment mode is {self._settings.comment_mode} '
+ f'and {flavour} existed in earlier comment')
+ return True
+ if stats_require:
+ logger.info(f'Comment required as comment mode is {self._settings.comment_mode} '
+ f'and {flavour} exist in current comment')
+ return True
+ return False
+
+ if self._settings.comment_mode == comment_mode_failures and \
+ do_stats_require_comment(earlier_stats.has_failures if earlier_stats else None,
+ stats.has_failures,
+ 'failures'):
+ return True
+
+ if self._settings.comment_mode in [comment_mode_failures, comment_mode_errors] and \
+ do_stats_require_comment(earlier_stats.has_errors if earlier_stats else None,
+ stats.has_errors,
+ 'errors'):
+ return True
+
+ return False
+
+ def get_latest_comment(self, pull: PullRequest) -> Optional[IssueComment]:
+ # get comments of this pull request
+ comments = self.get_pull_request_comments(pull, order_by_updated=True)
+
+ # get all comments that come from this action and are not hidden
+ comments = self.get_action_comments(comments)
+
+ # if there is no such comment, stop here
+ if len(comments) == 0:
+ return None
+
+ # fetch latest action comment
+ comment_id = comments[-1].get("databaseId")
+ return pull.get_issue_comment(comment_id)
+
+ def reuse_comment(self, comment: IssueComment, body: str):
+ if ':recycle:' not in body:
+ body = f'{body}\n:recycle: This comment has been updated with latest results.'
+
+ try:
+ comment.edit(body)
+ except Exception as e:
+ self._gha.warning(f'Failed to edit existing comment #{comment.id}')
+ logger.debug('editing existing comment failed', exc_info=e)
+
+ def get_base_commit_sha(self, pull_request: PullRequest) -> Optional[str]:
+ if self._settings.pull_request_build == pull_request_build_mode_merge:
+ if self._settings.event:
+ # for pull request events we take the other parent of the merge commit (base)
+ if self._settings.event_name == 'pull_request':
+ return get_json_path(self._settings.event, 'pull_request.base.sha')
+ # for workflow run events we should take the same as for pull request events,
+ # but we have no way to figure out the actual merge commit and its parents
+ # we do not take the base sha from pull_request as it is not immutable
+ if self._settings.event_name == 'workflow_run':
+ return None
+
+ try:
+ # we always fall back to where the branch merged off base ref
+ logger.debug(f'comparing {pull_request.base.ref} with {self._settings.commit}')
+ compare = self._repo.compare(pull_request.base.ref, self._settings.commit)
+ return compare.merge_base_commit.sha
+ except:
+ logger.warning(f'could not find best common ancestor '
+ f'between base {pull_request.base.sha} '
+ f'and commit {self._settings.commit}')
+
+ return None
+
+ def get_pull_request_comments(self, pull: PullRequest, order_by_updated: bool) -> List[Mapping[str, Any]]:
+ order = ''
+ if order_by_updated:
+ order = ', orderBy: { direction: ASC, field: UPDATED_AT }'
+
+ query = dict(
+ query=r'query ListComments {'
+ r' repository(owner:"' + self._repo.owner.login + r'", name:"' + self._repo.name + r'") {'
+ r' pullRequest(number: ' + str(pull.number) + r') {'
+ f' comments(last: 100{order}) {{'
+ r' nodes {'
+ r' id, databaseId, author { login }, body, isMinimized'
+ r' }'
+ r' }'
+ r' }'
+ r' }'
+ r'}'
+ )
+
+ headers, data = self._req.requestJsonAndCheck(
+ "POST", self._settings.graphql_url, input=query
+ )
+
+ return get_json_path(data, 'data.repository.pullRequest.comments.nodes')
+
+ def get_action_comments(self, comments: List[Mapping[str, Any]], is_minimized: Optional[bool] = False):
+ comment_body_start = f'## {self._settings.comment_title}\n'
+ comment_body_indicators = ['\nresults for commit ', '\nResults for commit ']
+ return list([comment for comment in comments
+ if get_json_path(comment, 'author.login') == self._settings.actor
+ and (is_minimized is None or comment.get('isMinimized') == is_minimized)
+ and comment.get('body', '').startswith(comment_body_start)
+ and any(indicator in comment.get('body', '') for indicator in comment_body_indicators)])
diff --git a/python/publish/trx.py b/python/publish/trx.py
new file mode 100644
index 0000000..b9d86cd
--- /dev/null
+++ b/python/publish/trx.py
@@ -0,0 +1,31 @@
+import pathlib
+from typing import Iterable, Callable
+
+from lxml import etree
+
+from publish.junit import JUnitTree, ParsedJUnitFile, progress_safe_parse_xml_file, xml_has_root_element
+
+with (pathlib.Path(__file__).resolve().parent / 'xslt' / 'trx-to-junit.xslt').open('r', encoding='utf-8') as r:
+ transform_trx_to_junit = etree.XSLT(etree.parse(r), regexp=False, access_control=etree.XSLTAccessControl.DENY_ALL)
+
+
+def is_trx(path: str) -> bool:
+ return xml_has_root_element(path, ['TestRun'])
+
+
+def parse_trx_file(path: str, large_files: bool) -> JUnitTree:
+ if large_files:
+ parser = etree.XMLParser(huge_tree=True)
+ trx = etree.parse(path, parser=parser)
+ else:
+ trx = etree.parse(path)
+ return transform_trx_to_junit(trx)
+
+
+def parse_trx_files(files: Iterable[str], large_files: bool,
+ progress: Callable[[ParsedJUnitFile], ParsedJUnitFile] = lambda x: x) -> Iterable[ParsedJUnitFile]:
+ """Parses trx files."""
+ def parse(path: str) -> JUnitTree:
+ return parse_trx_file(path, large_files)
+
+ return progress_safe_parse_xml_file(files, parse, progress)
diff --git a/python/publish/unittestresults.py b/python/publish/unittestresults.py
new file mode 100644
index 0000000..b46070f
--- /dev/null
+++ b/python/publish/unittestresults.py
@@ -0,0 +1,517 @@
+import dataclasses
+from collections import defaultdict
+from copy import deepcopy
+from dataclasses import dataclass
+from typing import Optional, List, Mapping, Any, Union, Dict, Callable, Tuple, AbstractSet
+from xml.etree.ElementTree import ParseError as XmlParseError
+
+
+@dataclass(frozen=True)
+class UnitTestCase:
+ result_file: str
+ test_file: Optional[str]
+ line: Optional[int]
+ class_name: Optional[str]
+ test_name: Optional[str]
+ result: str
+ message: Optional[str]
+ content: Optional[str]
+ stdout: Optional[str]
+ stderr: Optional[str]
+ time: Optional[float]
+
+
+UnitTestCaseFileName = str
+UnitTestCaseClassName = str
+UnitTestCaseTestName = str
+UnitTestCaseResultKey = Tuple[Optional[UnitTestCaseFileName], UnitTestCaseClassName, UnitTestCaseTestName]
+UnitTestCaseState = str
+UnitTestCaseResults = Mapping[UnitTestCaseResultKey, Mapping[UnitTestCaseState, List[UnitTestCase]]]
+
+
+def create_unit_test_case_results(indexed_cases: Optional[UnitTestCaseResults] = None) -> UnitTestCaseResults:
+ if indexed_cases:
+ return deepcopy(indexed_cases)
+ return defaultdict(lambda: defaultdict(list))
+
+
+@dataclass(frozen=True)
+class ParseError:
+ file: str
+ message: str
+ line: Optional[int] = None
+ column: Optional[int] = None
+ exception: Optional[BaseException] = None
+
+ @staticmethod
+ def from_exception(file: str, exception: BaseException):
+ if isinstance(exception, XmlParseError):
+ line, column = exception.position
+ msg = exception.msg
+ if msg.startswith('syntax error:') or \
+ msg.startswith('no element found:') or \
+ msg.startswith('unclosed token:') or \
+ msg.startswith('mismatched tag:'):
+ msg = f'File is not a valid XML file:\n{msg}'
+ elif msg.startswith('Invalid format.'):
+ msg = f'File is not a valid JUnit file:\n{msg}'
+ return ParseError(file=file, message=msg, line=line, column=column, exception=exception)
+ return ParseError(file=file, message=str(exception), exception=exception)
+
+ # exceptions can be arbitrary types and might not be serializable
+ def without_exception(self) -> 'ParseError':
+ return dataclasses.replace(self, exception=None)
+
+
+@dataclass(frozen=True)
+class ParsedUnitTestResults:
+ files: int
+ errors: List[ParseError]
+ suites: int
+ suite_tests: int
+ suite_skipped: int
+ suite_failures: int
+ suite_errors: int
+ suite_time: int
+ suite_details: List['UnitTestSuite']
+ cases: List[UnitTestCase]
+
+ def with_commit(self, commit: str) -> 'ParsedUnitTestResultsWithCommit':
+ return ParsedUnitTestResultsWithCommit(
+ self.files,
+ self.errors,
+ self.suites,
+ self.suite_tests,
+ self.suite_skipped,
+ self.suite_failures,
+ self.suite_errors,
+ self.suite_time,
+ self.suite_details,
+ self.cases,
+ commit
+ )
+
+
+@dataclass(frozen=True)
+class ParsedUnitTestResultsWithCommit(ParsedUnitTestResults):
+ commit: str
+
+ def with_cases(self,
+ cases_skipped: int,
+ cases_failures: int,
+ cases_errors: int,
+ cases_time: float,
+ case_results: UnitTestCaseResults,
+ tests: int,
+ tests_skipped: int,
+ tests_failures: int,
+ tests_errors: int) -> 'UnitTestResults':
+ return UnitTestResults(
+ files=self.files,
+ errors=self.errors,
+ suites=self.suites,
+ suite_tests=self.suite_tests,
+ suite_skipped=self.suite_skipped,
+ suite_failures=self.suite_failures,
+ suite_errors=self.suite_errors,
+ suite_time=self.suite_time,
+ suite_details=self.suite_details,
+ commit=self.commit,
+
+ cases=len(self.cases),
+ cases_skipped=cases_skipped,
+ cases_failures=cases_failures,
+ cases_errors=cases_errors,
+ cases_time=cases_time,
+ case_results=case_results,
+
+ tests=tests,
+ tests_skipped=tests_skipped,
+ tests_failures=tests_failures,
+ tests_errors=tests_errors
+ )
+
+ def without_cases(self):
+ # when there are no case information, we use the
+ # testsuite information for case and test level
+ return self.with_cases(
+ # test states and counts from cases
+ cases_skipped=self.suite_skipped,
+ cases_failures=self.suite_failures,
+ cases_errors=self.suite_errors,
+ cases_time=self.suite_time,
+ case_results=create_unit_test_case_results(),
+
+ tests=self.suite_tests,
+ tests_skipped=self.suite_skipped,
+ tests_failures=self.suite_failures,
+ tests_errors=self.suite_errors,
+ )
+
+
+@dataclass(frozen=True)
+class UnitTestSuite:
+ name: str
+ tests: int
+ skipped: int
+ failures: int
+ errors: int
+ stdout: Optional[str]
+ stderr: Optional[str]
+
+
+@dataclass(frozen=True)
+class UnitTestResults(ParsedUnitTestResultsWithCommit):
+ cases: int
+ cases_skipped: int
+ cases_failures: int
+ cases_errors: int
+ cases_time: float
+ case_results: UnitTestCaseResults
+
+ tests: int
+ tests_skipped: int
+ tests_failures: int
+ tests_errors: int
+
+
+@dataclass(frozen=True)
+class UnitTestRunResults:
+ files: int
+ errors: List[ParseError]
+ suites: int
+ duration: int
+
+ suite_details: Optional[List[UnitTestSuite]]
+
+ tests: int
+ tests_succ: int
+ tests_skip: int
+ tests_fail: int
+ tests_error: int
+
+ runs: int
+ runs_succ: int
+ runs_skip: int
+ runs_fail: int
+ runs_error: int
+
+ commit: str
+
+ @property
+ def is_delta(self) -> bool:
+ return False
+
+ @property
+ def has_failures(self):
+ return self.tests_fail > 0 or self.runs_fail > 0
+
+ @property
+ def has_errors(self):
+ return len(self.errors) > 0 or self.tests_error > 0 or self.runs_error > 0
+
+ @staticmethod
+ def _change_fields(results: 'UnitTestRunResults') -> List[int]:
+ return [results.files, results.suites,
+ results.tests, results.tests_succ, results.tests_skip, results.tests_fail, results.tests_error,
+ results.runs, results.runs_succ, results.runs_skip, results.runs_fail, results.runs_error]
+
+ @staticmethod
+ def _failure_fields(results: 'UnitTestRunResults') -> List[int]:
+ return [results.tests_fail, results.runs_fail]
+
+ @staticmethod
+ def _error_fields(results: 'UnitTestRunResults') -> List[int]:
+ return [results.tests_error, results.runs_error]
+
+ def is_different(self,
+ other: 'UnitTestRunResultsOrDeltaResults',
+ fields_func: Callable[['UnitTestRunResults'], List[int]] = _change_fields.__func__):
+ if other.is_delta:
+ other = other.without_delta()
+
+ return any([left != right for left, right in zip(fields_func(self), fields_func(other))])
+
+ def is_different_in_failures(self, other: 'UnitTestRunResultsOrDeltaResults'):
+ return self.is_different(other, self._failure_fields)
+
+ def is_different_in_errors(self, other: 'UnitTestRunResultsOrDeltaResults'):
+ return self.is_different(other, self._error_fields)
+
+ def with_errors(self, errors: List[ParseError]) -> 'UnitTestRunResults':
+ return UnitTestRunResults(
+ files=self.files,
+ errors=errors,
+ suites=self.suites,
+ duration=self.duration,
+
+ suite_details=self.suite_details,
+
+ tests=self.tests,
+ tests_succ=self.tests_succ,
+ tests_skip=self.tests_skip,
+ tests_fail=self.tests_fail,
+ tests_error=self.tests_error,
+
+ runs=self.runs,
+ runs_succ=self.runs_succ,
+ runs_skip=self.runs_skip,
+ runs_fail=self.runs_fail,
+ runs_error=self.runs_error,
+
+ commit=self.commit
+ )
+
+ # exceptions can be arbitrary types and might not be serializable
+ def without_exceptions(self) -> 'UnitTestRunResults':
+ return dataclasses.replace(self, errors=[error.without_exception() for error in self.errors])
+
+ def without_suite_details(self) -> 'UnitTestRunResults':
+ return dataclasses.replace(self, suite_details=None)
+
+ def to_dict(self) -> Dict[str, Any]:
+ # dict is usually used to serialize, but exceptions are likely not serializable, so we exclude them
+ # suite details might be arbitrarily large, we exclude those too
+ return dataclasses.asdict(self.without_exceptions().without_suite_details(),
+ # the dict_factory removes None values
+ dict_factory=lambda x: {k: v for (k, v) in x if v is not None})
+
+ @staticmethod
+ def from_dict(values: Mapping[str, Any]) -> 'UnitTestRunResults':
+ return UnitTestRunResults(
+ files=values.get('files'),
+ errors=values.get('errors', []),
+ suites=values.get('suites'),
+ duration=values.get('duration'),
+
+ suite_details=None,
+
+ tests=values.get('tests'),
+ tests_succ=values.get('tests_succ'),
+ tests_skip=values.get('tests_skip'),
+ tests_fail=values.get('tests_fail'),
+ tests_error=values.get('tests_error'),
+
+ runs=values.get('runs'),
+ runs_succ=values.get('runs_succ'),
+ runs_skip=values.get('runs_skip'),
+ runs_fail=values.get('runs_fail'),
+ runs_error=values.get('runs_error'),
+
+ commit=values.get('commit'),
+ )
+
+
+Numeric = Mapping[str, int]
+
+
+@dataclass(frozen=True)
+class UnitTestRunDeltaResults:
+ files: Numeric
+ errors: List[ParseError]
+ suites: Numeric
+ duration: Numeric
+
+ tests: Numeric
+ tests_succ: Numeric
+ tests_skip: Numeric
+ tests_fail: Numeric
+ tests_error: Numeric
+
+ runs: Numeric
+ runs_succ: Numeric
+ runs_skip: Numeric
+ runs_fail: Numeric
+ runs_error: Numeric
+
+ commit: str
+
+ reference_type: str
+ reference_commit: str
+
+ @property
+ def is_delta(self) -> bool:
+ return True
+
+ @staticmethod
+ def _has_changes(fields: List[Numeric]) -> bool:
+ return any([field.get('delta') for field in fields])
+
+ @property
+ def has_changes(self) -> bool:
+ return self._has_changes([self.files, self.suites,
+ self.tests, self.tests_succ, self.tests_skip, self.tests_fail, self.tests_error,
+ self.runs, self.runs_succ, self.runs_skip, self.runs_fail, self.runs_error])
+
+ @property
+ def has_failure_changes(self) -> bool:
+ return self._has_changes([self.tests_fail, self.runs_fail])
+
+ @property
+ def has_error_changes(self) -> bool:
+ return self._has_changes([self.tests_error, self.runs_error])
+
+ @property
+ def has_failures(self):
+ return self.tests_fail.get('number') > 0 or self.runs_fail.get('number') > 0
+
+ @property
+ def has_errors(self):
+ return len(self.errors) > 0 or self.tests_error.get('number') > 0 or self.runs_error.get('number') > 0
+
+ def to_dict(self) -> Dict[str, Any]:
+ # dict is usually used to serialize, but exceptions are likely not serializable, so we exclude them
+ return dataclasses.asdict(self.without_exceptions())
+
+ def without_delta(self) -> UnitTestRunResults:
+ def v(value: Numeric) -> int:
+ return value['number']
+
+ def d(value: Numeric) -> int:
+ return value['duration']
+
+ return UnitTestRunResults(files=v(self.files), errors=self.errors, suites=v(self.suites), duration=d(self.duration), suite_details=None,
+ tests=v(self.tests), tests_succ=v(self.tests_succ), tests_skip=v(self.tests_skip), tests_fail=v(self.tests_fail), tests_error=v(self.tests_error),
+ runs=v(self.runs), runs_succ=v(self.runs_succ), runs_skip=v(self.runs_skip), runs_fail=v(self.runs_fail), runs_error=v(self.runs_error),
+ commit=self.commit)
+
+ def without_exceptions(self) -> 'UnitTestRunDeltaResults':
+ return dataclasses.replace(self, errors=[error.without_exception() for error in self.errors])
+
+
+UnitTestRunResultsOrDeltaResults = Union[UnitTestRunResults, UnitTestRunDeltaResults]
+
+
+def aggregate_states(states: AbstractSet[str]) -> str:
+ return 'error' if 'error' in states else \
+ 'failure' if 'failure' in states else \
+ 'success' if 'success' in states else \
+ 'skipped'
+
+
+def get_test_results(parsed_results: ParsedUnitTestResultsWithCommit,
+ dedup_classes_by_file_name: bool) -> UnitTestResults:
+ """
+ Computes case and test statistics and returns them as a UnitTestResults instance.
+ With dedup_classes_by_file_name=True, considers file name to identify classes,
+ not just their class name.
+
+ :param parsed_results: parsed unit test results
+ :param dedup_classes_by_file_name:
+ :return: unit test result statistics
+ """
+ cases = parsed_results.cases
+
+ if len(cases) == 0:
+ return parsed_results.without_cases()
+
+ cases_skipped = [case for case in cases if case.result in ['skipped', 'disabled']]
+ cases_failures = [case for case in cases if case.result == 'failure']
+ cases_errors = [case for case in cases if case.result == 'error']
+ cases_time = sum([case.time or 0 for case in cases])
+
+ # index cases by tests and state
+ cases_results = create_unit_test_case_results()
+ for case in cases:
+ # index by test file name (when de-duplicating by file name), class name and test name
+ test = (case.test_file if dedup_classes_by_file_name else None, case.class_name, case.test_name)
+
+ # second index by state
+ state = case.result if case.result != 'disabled' else 'skipped'
+
+ # collect cases of test and state
+ cases_results[test][state].append(case)
+
+ test_results = dict()
+ for test, states in cases_results.items():
+ test_results[test] = aggregate_states(states.keys())
+
+ tests = len(test_results)
+ tests_skipped = len([test for test, state in test_results.items() if state in ['skipped', 'disabled']])
+ tests_failures = len([test for test, state in test_results.items() if state == 'failure'])
+ tests_errors = len([test for test, state in test_results.items() if state == 'error'])
+
+ return parsed_results.with_cases(
+ # test states and counts from cases
+ cases_skipped=len(cases_skipped),
+ cases_failures=len(cases_failures),
+ cases_errors=len(cases_errors),
+ cases_time=cases_time,
+ case_results=cases_results,
+
+ tests=tests,
+ # distinct test states by case name
+ tests_skipped=tests_skipped,
+ tests_failures=tests_failures,
+ tests_errors=tests_errors,
+ )
+
+
+def get_stats(test_results: UnitTestResults) -> UnitTestRunResults:
+ """Provides stats for the given test results."""
+ tests_succ = test_results.tests - test_results.tests_skipped - test_results.tests_failures - test_results.tests_errors
+ runs_succ = test_results.suite_tests - test_results.suite_skipped - test_results.suite_failures - test_results.suite_errors
+
+ return UnitTestRunResults(
+ files=test_results.files,
+ errors=test_results.errors,
+ suites=test_results.suites,
+ duration=test_results.suite_time,
+
+ suite_details=test_results.suite_details,
+
+ tests=test_results.tests,
+ tests_succ=tests_succ,
+ tests_skip=test_results.tests_skipped,
+ tests_fail=test_results.tests_failures,
+ tests_error=test_results.tests_errors,
+
+ runs=test_results.suite_tests,
+ runs_succ=runs_succ,
+ runs_skip=test_results.suite_skipped,
+ runs_fail=test_results.suite_failures,
+ runs_error=test_results.suite_errors,
+
+ commit=test_results.commit
+ )
+
+
+def get_diff_value(value: int, reference: int, field: str = 'number') -> Numeric:
+ if field == 'duration':
+ val = dict(duration=value)
+ elif field == 'number':
+ val = dict(number=value)
+ else:
+ raise ValueError(f'unsupported field: {field}')
+
+ val['delta'] = value - reference
+ return val
+
+
+def get_stats_delta(stats: UnitTestRunResults,
+ reference_stats: UnitTestRunResults,
+ reference_type: str) -> UnitTestRunDeltaResults:
+ """Given two stats provides a stats with deltas."""
+ return UnitTestRunDeltaResults(
+ files=get_diff_value(stats.files, reference_stats.files),
+ errors=stats.errors,
+ suites=get_diff_value(stats.suites, reference_stats.suites),
+ duration=get_diff_value(stats.duration, reference_stats.duration, 'duration'),
+
+ tests=get_diff_value(stats.tests, reference_stats.tests),
+ tests_succ=get_diff_value(stats.tests_succ, reference_stats.tests_succ),
+ tests_skip=get_diff_value(stats.tests_skip, reference_stats.tests_skip),
+ tests_fail=get_diff_value(stats.tests_fail, reference_stats.tests_fail),
+ tests_error=get_diff_value(stats.tests_error, reference_stats.tests_error),
+
+ runs=get_diff_value(stats.runs, reference_stats.runs),
+ runs_succ=get_diff_value(stats.runs_succ, reference_stats.runs_succ),
+ runs_skip=get_diff_value(stats.runs_skip, reference_stats.runs_skip),
+ runs_fail=get_diff_value(stats.runs_fail, reference_stats.runs_fail),
+ runs_error=get_diff_value(stats.runs_error, reference_stats.runs_error),
+
+ commit=stats.commit,
+
+ reference_type=reference_type,
+ reference_commit=reference_stats.commit
+ )
diff --git a/python/publish/xslt/nunit-to-junit.xslt b/python/publish/xslt/nunit-to-junit.xslt
new file mode 100644
index 0000000..11b9055
--- /dev/null
+++ b/python/publish/xslt/nunit-to-junit.xslt
@@ -0,0 +1,88 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+MESSAGE:
+
++++++++++++++++++++
+STACK TRACE:
+
+
+
+MESSAGE:
+
++++++++++++++++++++
+STACK TRACE:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/publish/xslt/nunit3-to-junit.xslt b/python/publish/xslt/nunit3-to-junit.xslt
new file mode 100644
index 0000000..00d0551
--- /dev/null
+++ b/python/publish/xslt/nunit3-to-junit.xslt
@@ -0,0 +1,139 @@
+
+
+
+
+
+
+
+
+
+ 0
+
+
+
+
+
+ 0
+
+
+
+
+
+ 0
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 0
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/publish/xslt/trx-to-junit.xslt b/python/publish/xslt/trx-to-junit.xslt
new file mode 100644
index 0000000..3cb1a7d
--- /dev/null
+++ b/python/publish/xslt/trx-to-junit.xslt
@@ -0,0 +1,268 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/publish/xslt/xunit-to-junit.xslt b/python/publish/xslt/xunit-to-junit.xslt
new file mode 100644
index 0000000..6ee68d6
--- /dev/null
+++ b/python/publish/xslt/xunit-to-junit.xslt
@@ -0,0 +1,63 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ T
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/publish/xunit.py b/python/publish/xunit.py
new file mode 100644
index 0000000..ec7c1a8
--- /dev/null
+++ b/python/publish/xunit.py
@@ -0,0 +1,31 @@
+import pathlib
+from typing import Iterable, Callable
+
+from lxml import etree
+
+from publish.junit import JUnitTree, ParsedJUnitFile, progress_safe_parse_xml_file, xml_has_root_element
+
+with (pathlib.Path(__file__).resolve().parent / 'xslt' / 'xunit-to-junit.xslt').open('r', encoding='utf-8') as r:
+ transform_xunit_to_junit = etree.XSLT(etree.parse(r), regexp=False, access_control=etree.XSLTAccessControl.DENY_ALL)
+
+
+def is_xunit(path: str) -> bool:
+ return xml_has_root_element(path, ['assemblies', 'assembly'])
+
+
+def parse_xunit_file(path: str, large_files: bool) -> JUnitTree:
+ if large_files:
+ parser = etree.XMLParser(huge_tree=True)
+ xunit = etree.parse(path, parser=parser)
+ else:
+ xunit = etree.parse(path)
+ return transform_xunit_to_junit(xunit)
+
+
+def parse_xunit_files(files: Iterable[str], large_files: bool,
+ progress: Callable[[ParsedJUnitFile], ParsedJUnitFile] = lambda x: x) -> Iterable[ParsedJUnitFile]:
+ """Parses xunit files."""
+ def parse(path: str) -> JUnitTree:
+ return parse_xunit_file(path, large_files)
+
+ return progress_safe_parse_xml_file(files, parse, progress)
diff --git a/python/publish_test_results.py b/python/publish_test_results.py
new file mode 100644
index 0000000..0fd8da0
--- /dev/null
+++ b/python/publish_test_results.py
@@ -0,0 +1,540 @@
+import json
+import logging
+import os
+import re
+import sys
+from glob import glob
+from pathlib import Path
+from typing import List, Optional, Union, Mapping, Tuple, Any, Iterable, Callable
+
+import github
+import humanize
+import psutil
+from github.GithubRetry import DEFAULT_SECONDARY_RATE_WAIT
+
+import publish.github_action
+from publish import __version__, available_annotations, default_annotations, none_annotations, \
+ report_suite_out_log, report_suite_err_log, report_suite_logs, default_report_suite_logs, available_report_suite_logs, \
+ pull_request_build_modes, fail_on_modes, fail_on_mode_errors, fail_on_mode_failures, \
+ comment_mode_always, comment_modes, punctuation_space
+from publish.github_action import GithubAction
+from publish.junit import JUnitTree, parse_junit_xml_files, parse_junit_xml_file, process_junit_xml_elems, \
+ ParsedJUnitFile, progress_safe_parse_xml_file, is_junit
+from publish.progress import progress_logger
+from publish.publisher import Publisher, Settings
+from publish.unittestresults import get_test_results, get_stats, ParsedUnitTestResults, ParsedUnitTestResultsWithCommit, \
+ ParseError
+
+logger = logging.getLogger('publish')
+
+
+def get_conclusion(parsed: ParsedUnitTestResults, fail_on_failures, fail_on_errors) -> str:
+ if parsed.files == 0:
+ return 'neutral'
+ if fail_on_errors and len(parsed.errors) > 0:
+ return 'failure'
+ if fail_on_failures and parsed.suite_failures > 0 or fail_on_errors and parsed.suite_errors > 0:
+ return 'failure'
+ return 'success'
+
+
+def get_github(auth: github.Auth,
+ url: str,
+ retries: int,
+ backoff_factor: float,
+ seconds_between_requests: Optional[float],
+ seconds_between_writes: Optional[float],
+ secondary_rate_wait: float) -> github.Github:
+ retry = github.GithubRetry(total=retries,
+ backoff_factor=backoff_factor,
+ secondary_rate_wait=secondary_rate_wait)
+ return github.Github(auth=auth,
+ base_url=url,
+ per_page=100,
+ retry=retry,
+ seconds_between_requests=seconds_between_requests,
+ seconds_between_writes=seconds_between_writes)
+
+
+def get_files(multiline_files_globs: str) -> Tuple[List[str], bool]:
+ multiline_files_globs = re.split('\r?\n\r?', multiline_files_globs)
+ included = {str(file)
+ for files_glob in multiline_files_globs
+ if not files_glob.startswith('!')
+ for file in glob(files_glob, recursive=True)}
+ excluded = {str(file)
+ for files_glob in multiline_files_globs
+ if files_glob.startswith('!')
+ for file in glob(files_glob[1:], recursive=True)}
+ has_absolute = any({Path(pattern).is_absolute()
+ for files_glob in multiline_files_globs
+ for pattern in [files_glob[1:] if files_glob.startswith('!') else files_glob]})
+ return list(included - excluded), has_absolute
+
+
+def prettify_glob_pattern(pattern: Optional[str]) -> Optional[str]:
+ if pattern is not None:
+ return re.sub('\r?\n\r?', ', ', pattern.strip())
+
+
+def expand_glob(pattern: Optional[str], file_format: Optional[str], gha: GithubAction) -> List[str]:
+ if not pattern:
+ return []
+
+ files, has_absolute_patterns = get_files(pattern)
+ file_format = f' {file_format}' if file_format else ''
+
+ prettyfied_pattern = prettify_glob_pattern(pattern)
+ if len(files) == 0:
+ gha.warning(f'Could not find any{file_format} files for {prettyfied_pattern}')
+ if has_absolute_patterns:
+ gha.warning(f'Your file pattern contains absolute paths, please read the notes on absolute paths:')
+ gha.warning(f'https://github.com/step-security/publish-unit-test-result-action/blob/{__version__}/README.md#running-with-absolute-paths')
+ else:
+ logger.info(f'Reading{file_format} files {prettyfied_pattern} ({get_number_of_files(files)}, {get_files_size(files)})')
+ logger.debug(f'reading{file_format} files {list(files)}')
+
+ return files
+
+
+def get_files_size(files: List[str]) -> str:
+ try:
+ size = sum([os.path.getsize(file) for file in files])
+ return humanize.naturalsize(size, binary=True)
+ except BaseException as e:
+ logger.warning(f'failed to obtain file size of {len(files)} files', exc_info=e)
+ return 'unknown size'
+
+
+def get_number_of_files(files: List[str], label: str = 'file') -> str:
+ number_of_files = '{number:,} {label}{s}'.format(
+ number=len(files),
+ label=label,
+ s='s' if len(files) > 1 else ''
+ ).replace(',', punctuation_space)
+ return number_of_files
+
+
+def parse_files_as_xml(files: Iterable[str], large_files: bool, drop_testcases: bool,
+ progress: Callable[[ParsedJUnitFile], ParsedJUnitFile] = lambda x: x) -> Iterable[ParsedJUnitFile]:
+ junit_files = []
+ nunit_files = []
+ xunit_files = []
+ trx_files = []
+ dart_json_files = []
+ mocha_json_files = []
+ unknown_files = []
+
+ def parse(path: str) -> JUnitTree:
+ if is_junit(path):
+ junit_files.append(path)
+ return parse_junit_xml_file(path, large_files, drop_testcases)
+
+ from publish.nunit import is_nunit, parse_nunit_file
+ if is_nunit(path):
+ nunit_files.append(path)
+ return parse_nunit_file(path, large_files)
+
+ from publish.xunit import is_xunit, parse_xunit_file
+ if is_xunit(path):
+ xunit_files.append(path)
+ return parse_xunit_file(path, large_files)
+
+ from publish.trx import is_trx, parse_trx_file
+ if is_trx(path):
+ trx_files.append(path)
+ return parse_trx_file(path, large_files)
+
+ from publish.dart import is_dart_json, parse_dart_json_file
+ if is_dart_json(path):
+ dart_json_files.append(path)
+ return parse_dart_json_file(path)
+
+ from publish.mocha import is_mocha_json, parse_mocha_json_file
+ if is_mocha_json(path):
+ mocha_json_files.append(path)
+ return parse_mocha_json_file(path)
+
+ unknown_files.append(path)
+ raise RuntimeError(f'Unsupported file format: {path}')
+
+ try:
+ return progress_safe_parse_xml_file(files, parse, progress)
+ finally:
+ for flavour, files in [
+ ('JUnit XML', junit_files),
+ ('NUnit XML', nunit_files),
+ ('XUnit XML', xunit_files),
+ ('TRX', trx_files),
+ ('Dart JSON', dart_json_files),
+ ('Mocha JSON', mocha_json_files),
+ ('unsupported', unknown_files)
+ ]:
+ if files:
+ logger.info(f'Detected {get_number_of_files(files, f"{flavour} file")} ({get_files_size(files)})')
+ if flavour == 'unsupported':
+ for file in files:
+ logger.info(f'Unsupported file: {file}')
+ else:
+ logger.debug(f'detected {flavour} files {list(files)}')
+
+
+def parse_files(settings: Settings, gha: GithubAction) -> ParsedUnitTestResultsWithCommit:
+ # expand file globs
+ files = expand_glob(settings.files_glob, None, gha)
+ junit_files = expand_glob(settings.junit_files_glob, 'JUnit XML', gha)
+ nunit_files = expand_glob(settings.nunit_files_glob, 'NUnit XML', gha)
+ xunit_files = expand_glob(settings.xunit_files_glob, 'XUnit XML', gha)
+ trx_files = expand_glob(settings.trx_files_glob, 'TRX', gha)
+
+ elems = []
+
+ # parse files, log the progress
+ with progress_logger(items=len(files + junit_files + nunit_files + xunit_files + trx_files),
+ interval_seconds=10,
+ progress_template='Read {progress} files in {time}',
+ finish_template='Finished reading {observations} files in {duration}',
+ progress_item_type=Tuple[str, Any],
+ logger=logger) as progress:
+ if files:
+ elems.extend(parse_files_as_xml(files, settings.large_files, settings.ignore_runs, progress))
+ if junit_files:
+ elems.extend(parse_junit_xml_files(junit_files, settings.large_files, settings.ignore_runs, progress))
+ if xunit_files:
+ from publish.xunit import parse_xunit_files
+ elems.extend(parse_xunit_files(xunit_files, settings.large_files, progress))
+ if nunit_files:
+ from publish.nunit import parse_nunit_files
+ elems.extend(parse_nunit_files(nunit_files, settings.large_files, progress))
+ if trx_files:
+ from publish.trx import parse_trx_files
+ elems.extend(parse_trx_files(trx_files, settings.large_files, progress))
+
+ # get the test results
+ return process_junit_xml_elems(
+ elems,
+ time_factor=settings.time_factor,
+ test_file_prefix=settings.test_file_prefix,
+ add_suite_details=settings.report_suite_out_logs or settings.report_suite_err_logs or settings.json_suite_details
+ ).with_commit(settings.commit)
+
+
+def log_parse_errors(errors: List[ParseError], gha: GithubAction):
+ [gha.error(message=f'Error processing result file: {error.message}', file=error.file, line=error.line, column=error.column, exception=error.exception)
+ for error in errors]
+
+
+def action_fail_required(conclusion: str, action_fail: bool, action_fail_on_inconclusive: bool) -> bool:
+ return action_fail and conclusion == 'failure' or \
+ action_fail_on_inconclusive and conclusion == 'inconclusive'
+
+
+def main(settings: Settings, gha: GithubAction) -> None:
+ if settings.is_fork and not settings.job_summary:
+ gha.warning(f'This action is running on a pull_request event for a fork repository. '
+ f'The only useful thing it can do in this situation is creating a job summary, which is disabled in settings. '
+ f'To fully run the action on fork repository pull requests, see '
+ f'https://github.com/step-security/publish-unit-test-result-action/blob/{__version__}/README.md#support-fork-repositories-and-dependabot-branches')
+ return
+
+ # log the available RAM to help spot OOM issues:
+ avail_mem = humanize.naturalsize(psutil.virtual_memory().available, binary=True)
+ logger.info(f'Available memory to read files: {avail_mem}')
+
+ # get the unit test results
+ parsed = parse_files(settings, gha)
+ log_parse_errors(parsed.errors, gha)
+
+ # process the parsed results
+ results = get_test_results(parsed, settings.dedup_classes_by_file_name)
+
+ # turn them into stats
+ stats = get_stats(results)
+
+ # derive check run conclusion from files
+ conclusion = get_conclusion(parsed, fail_on_failures=settings.fail_on_failures, fail_on_errors=settings.fail_on_errors)
+
+ # publish the delta stats
+ backoff_factor = max(settings.seconds_between_github_reads, settings.seconds_between_github_writes)
+ gh = get_github(auth=github.Auth.Token(settings.token),
+ url=settings.api_url,
+ retries=settings.api_retries,
+ backoff_factor=backoff_factor,
+ seconds_between_requests=settings.seconds_between_github_reads,
+ seconds_between_writes=settings.seconds_between_github_writes,
+ secondary_rate_wait=settings.secondary_rate_limit_wait_seconds)
+ Publisher(settings, gh, gha).publish(stats, results.case_results, conclusion)
+
+ if action_fail_required(conclusion, settings.action_fail, settings.action_fail_on_inconclusive):
+ gha.error(f'This action finished successfully, but test results have status {conclusion}.')
+ sys.exit(1)
+
+
+def get_commit_sha(event: dict, event_name: str, options: dict):
+ logger.debug(f"action triggered by '{event_name}' event")
+
+ # https://developer.github.com/webhooks/event-payloads/
+ if event_name.startswith('pull_request'):
+ return event.get('pull_request', {}).get('head', {}).get('sha')
+
+ # https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows
+ return options.get('GITHUB_SHA')
+
+
+def get_annotations_config(options: dict, event: Optional[dict]) -> List[str]:
+ annotations = get_var('CHECK_RUN_ANNOTATIONS', options)
+ annotations = [annotation.strip() for annotation in annotations.split(',')] \
+ if annotations else default_annotations
+ default_branch = event.get('repository', {}).get('default_branch') if event else None
+ annotations_branch = get_var('CHECK_RUN_ANNOTATIONS_BRANCH', options) or default_branch or 'main, master'
+ annotations_branches = {f'refs/heads/{branch.strip()}' for branch in annotations_branch.split(',')}
+ branch = get_var('GITHUB_REF', options)
+
+ if annotations and branch and annotations_branches and \
+ 'refs/heads/*' not in annotations_branches and \
+ branch not in annotations_branches:
+ annotations = []
+
+ return annotations
+
+
+def get_var(name: str, options: dict) -> Optional[str]:
+ """
+ Returns the value from the given dict with key 'INPUT_$key',
+ or if this does not exist, key 'key'.
+ """
+ # the last 'or None' turns empty strings into None
+ return options.get(f'INPUT_{name}') or options.get(name) or None
+
+
+def get_bool_var(name: str, options: dict, default: bool) -> bool:
+ """
+ Same as get_var(), but checks if the value is a valid boolean.
+ Prints a warning and uses the default if the string value is not a boolean value.
+ If the value is unset, returns the default.
+ """
+ val = get_var(name, options)
+ if not val:
+ return default
+
+ val = val.lower()
+ if val == 'true':
+ return True
+ elif val == 'false':
+ return False
+ else:
+ raise RuntimeError(f'Option {name.lower()} has to be boolean, so either "true" or "false": {val}')
+
+
+def check_var(var: Union[Optional[str], List[str]],
+ name: str,
+ label: str,
+ allowed_values: Optional[List[str]] = None,
+ deprecated_values: Optional[List[str]] = None) -> None:
+ if var is None:
+ raise RuntimeError(f'{label} must be provided via action input or environment variable {name}')
+
+ if allowed_values:
+ if isinstance(var, str):
+ if var not in allowed_values + (deprecated_values or []):
+ raise RuntimeError(f"Value '{var}' is not supported for variable {name}, "
+ f"expected: {', '.join(allowed_values)}")
+ if isinstance(var, list):
+ if any([v not in allowed_values + (deprecated_values or []) for v in var]):
+ raise RuntimeError(f"Some values in '{', '.join(var)}' "
+ f"are not supported for variable {name}, "
+ f"allowed: {', '.join(allowed_values)}")
+
+
+def check_var_condition(condition: bool, message: str) -> None:
+ if not condition:
+ raise RuntimeError(message)
+
+
+def deprecate_var(val: Optional[str], deprecated_var: str, replacement_var: str, gha: Optional[GithubAction]):
+ if val is not None:
+ message = f'Option {deprecated_var.lower()} is deprecated! {replacement_var}'
+
+ if gha is None:
+ logger.warning(message)
+ else:
+ gha.warning(message)
+
+
+def available_values(values: List[str]) -> str:
+ values = [f'"{val}"' for val in values]
+ return f"{', '.join(values[:-1])} or {values[-1]}"
+
+
+def deprecate_val(val: Optional[str], var: str, replacement_vals: Mapping[str, str], gha: Optional[GithubAction]):
+ if val in replacement_vals:
+ message = f'Value "{val}" for option {var.lower()} is deprecated!'
+ replacement = replacement_vals[val]
+ if replacement:
+ message = f'{message} Instead, use value "{replacement}".'
+
+ if gha is None:
+ logger.warning(message)
+ else:
+ gha.warning(message)
+
+
+def is_float(text: str) -> bool:
+ return re.match('^[+-]?(([0-9]*\\.[0-9]+)|([0-9]+(\\.[0-9]?)?))$', text) is not None
+
+
+def get_settings(options: dict, gha: GithubAction) -> Settings:
+ event_file = get_var('EVENT_FILE', options)
+ event = event_file or get_var('GITHUB_EVENT_PATH', options)
+ event_name = get_var('EVENT_NAME', options) or get_var('GITHUB_EVENT_NAME', options)
+ check_var(event, 'GITHUB_EVENT_PATH', 'GitHub event file path')
+ check_var(event_name, 'GITHUB_EVENT_NAME', 'GitHub event name')
+ with open(event, 'rt', encoding='utf-8') as f:
+ event = json.load(f)
+
+ repo = get_var('GITHUB_REPOSITORY', options)
+ job_summary = get_bool_var('JOB_SUMMARY', options, default=True)
+ comment_mode = get_var('COMMENT_MODE', options) or comment_mode_always
+
+ # we cannot create a check run or pull request comment when running on pull_request event from a fork
+ # when event_file is given we assume proper setup as in README.md#support-fork-repositories-and-dependabot-branches
+ is_fork = event_file is None and \
+ event_name == 'pull_request' and \
+ event.get('pull_request', {}).get('head', {}).get('repo', {}).get('full_name') != repo
+
+ api_url = options.get('GITHUB_API_URL') or github.Consts.DEFAULT_BASE_URL
+ graphql_url = options.get('GITHUB_GRAPHQL_URL') or f'{github.Consts.DEFAULT_BASE_URL}/graphql'
+ test_changes_limit = get_var('TEST_CHANGES_LIMIT', options) or '10'
+ check_var_condition(test_changes_limit.isnumeric(), f'TEST_CHANGES_LIMIT must be a positive integer or 0: {test_changes_limit}')
+
+ default_files_glob = None
+ flavours = ['JUNIT', 'NUNIT', 'XUNIT', 'TRX']
+ if not any(get_var(option, options) for option in ['FILES'] + [f'{flavour}_FILES' for flavour in flavours]):
+ default_files_glob = '*.xml'
+ gha.warning(f'At least one of the FILES, JUNIT_FILES, NUNIT_FILES, XUNIT_FILES, or TRX_FILES options has to be set! '
+ f'Falling back to deprecated default "{default_files_glob}"')
+
+ time_unit = get_var('TIME_UNIT', options) or 'seconds'
+ time_factors = {'seconds': 1.0, 'milliseconds': 0.001}
+ time_factor = time_factors.get(time_unit.lower())
+ check_var_condition(time_factor is not None, f'TIME_UNIT {time_unit} is not supported. '
+ f'It is optional, but when given must be one of these values: '
+ f'{", ".join(time_factors.keys())}')
+
+ check_name = get_var('CHECK_NAME', options) or 'Test Results'
+ annotations = get_annotations_config(options, event)
+ suite_logs_mode = get_var('REPORT_SUITE_LOGS', options) or default_report_suite_logs
+ ignore_runs = get_bool_var('IGNORE_RUNS', options, default=False)
+
+ fail_on = get_var('FAIL_ON', options) or 'test failures'
+ check_var(fail_on, 'FAIL_ON', 'Check fail mode', fail_on_modes)
+ # here we decide that we want to fail on errors when we fail on test failures, like log level escalation
+ fail_on_failures = fail_on == fail_on_mode_failures
+ fail_on_errors = fail_on == fail_on_mode_errors or fail_on_failures
+
+ retries = get_var('GITHUB_RETRIES', options) or '10'
+ seconds_between_github_reads = get_var('SECONDS_BETWEEN_GITHUB_READS', options) or '1'
+ seconds_between_github_writes = get_var('SECONDS_BETWEEN_GITHUB_WRITES', options) or '2'
+ secondary_rate_limit_wait_seconds = get_var('SECONDARY_RATE_LIMIT_WAIT_SECONDS', options) or str(DEFAULT_SECONDARY_RATE_WAIT)
+ check_var_condition(retries.isnumeric(), f'GITHUB_RETRIES must be a positive integer or 0: {retries}')
+ check_var_condition(is_float(seconds_between_github_reads), f'SECONDS_BETWEEN_GITHUB_READS must be an integer or float number: {seconds_between_github_reads}')
+ check_var_condition(is_float(seconds_between_github_writes), f'SECONDS_BETWEEN_GITHUB_WRITES must be an integer or float number: {seconds_between_github_writes}')
+ check_var_condition(is_float(secondary_rate_limit_wait_seconds), f'SECONDARY_RATE_LIMIT_WAIT_SECONDS must be an integer or float number: {secondary_rate_limit_wait_seconds}')
+
+ settings = Settings(
+ token=get_var('GITHUB_TOKEN', options),
+ actor=get_var('GITHUB_TOKEN_ACTOR', options) or 'github-actions',
+ api_url=api_url,
+ graphql_url=graphql_url,
+ api_retries=int(retries),
+ event=event,
+ event_file=event_file,
+ event_name=event_name,
+ is_fork=is_fork,
+ repo=repo,
+ commit=get_var('COMMIT', options) or get_commit_sha(event, event_name, options),
+ json_file=get_var('JSON_FILE', options),
+ json_thousands_separator=get_var('JSON_THOUSANDS_SEPARATOR', options) or punctuation_space,
+ json_suite_details=get_bool_var('JSON_SUITE_DETAILS', options, default=False),
+ json_test_case_results=get_bool_var('JSON_TEST_CASE_RESULTS', options, default=False),
+ fail_on_errors=fail_on_errors,
+ fail_on_failures=fail_on_failures,
+ action_fail=get_bool_var('ACTION_FAIL', options, default=False),
+ action_fail_on_inconclusive=get_bool_var('ACTION_FAIL_ON_INCONCLUSIVE', options, default=False),
+ files_glob=get_var('FILES', options) or default_files_glob,
+ junit_files_glob=get_var('JUNIT_FILES', options),
+ nunit_files_glob=get_var('NUNIT_FILES', options),
+ xunit_files_glob=get_var('XUNIT_FILES', options),
+ trx_files_glob=get_var('TRX_FILES', options),
+ time_factor=time_factor,
+ test_file_prefix=get_var('TEST_FILE_PREFIX', options) or None,
+ check_name=check_name,
+ comment_title=get_var('COMMENT_TITLE', options) or check_name,
+ comment_mode=comment_mode,
+ job_summary=job_summary,
+ compare_earlier=get_bool_var('COMPARE_TO_EARLIER_COMMIT', options, default=True),
+ pull_request_build=get_var('PULL_REQUEST_BUILD', options) or 'merge',
+ test_changes_limit=int(test_changes_limit),
+ report_individual_runs=get_bool_var('REPORT_INDIVIDUAL_RUNS', options, default=False),
+ report_suite_out_logs=suite_logs_mode in {report_suite_logs, report_suite_out_log},
+ report_suite_err_logs=suite_logs_mode in {report_suite_logs, report_suite_err_log},
+ dedup_classes_by_file_name=get_bool_var('DEDUPLICATE_CLASSES_BY_FILE_NAME', options, default=False),
+ large_files=get_bool_var('LARGE_FILES', options, default=ignore_runs),
+ ignore_runs=ignore_runs,
+ check_run_annotation=annotations,
+ seconds_between_github_reads=float(seconds_between_github_reads),
+ seconds_between_github_writes=float(seconds_between_github_writes),
+ secondary_rate_limit_wait_seconds=float(secondary_rate_limit_wait_seconds),
+ search_pull_requests=get_bool_var('SEARCH_PULL_REQUESTS', options, default=False),
+ )
+
+ check_var(settings.token, 'GITHUB_TOKEN', 'GitHub token')
+ check_var(settings.repo, 'GITHUB_REPOSITORY', 'GitHub repository')
+ check_var(settings.commit, 'COMMIT, GITHUB_SHA or event file', 'Commit SHA')
+ check_var_condition(
+ settings.test_file_prefix is None or any([settings.test_file_prefix.startswith(sign) for sign in ['-', '+']]),
+ f"TEST_FILE_PREFIX is optional, but when given, it must start with '-' or '+': {settings.test_file_prefix}"
+ )
+ check_var(settings.comment_mode, 'COMMENT_MODE', 'Comment mode', comment_modes)
+ check_var(settings.pull_request_build, 'PULL_REQUEST_BUILD', 'Pull Request build', pull_request_build_modes)
+ check_var(suite_logs_mode, 'REPORT_SUITE_LOGS', 'Report suite logs mode', available_report_suite_logs)
+ check_var(settings.check_run_annotation, 'CHECK_RUN_ANNOTATIONS', 'Check run annotations', available_annotations)
+ check_var_condition(
+ none_annotations not in settings.check_run_annotation or len(settings.check_run_annotation) == 1,
+ f"CHECK_RUN_ANNOTATIONS '{none_annotations}' cannot be combined with other annotations: {', '.join(settings.check_run_annotation)}"
+ )
+
+ check_var_condition(settings.test_changes_limit >= 0, f'TEST_CHANGES_LIMIT must be a positive integer or 0: {settings.test_changes_limit}')
+ check_var_condition(settings.api_retries >= 0, f'GITHUB_RETRIES must be a positive integer or 0: {settings.api_retries}')
+ check_var_condition(settings.seconds_between_github_reads > 0, f'SECONDS_BETWEEN_GITHUB_READS must be a positive number: {seconds_between_github_reads}')
+ check_var_condition(settings.seconds_between_github_writes > 0, f'SECONDS_BETWEEN_GITHUB_WRITES must be a positive number: {seconds_between_github_writes}')
+ check_var_condition(settings.secondary_rate_limit_wait_seconds > 0, f'SECONDARY_RATE_LIMIT_WAIT_SECONDS must be a positive number: {secondary_rate_limit_wait_seconds}')
+
+ return settings
+
+
+def set_log_level(handler: logging.Logger, level: str, gha: GithubAction):
+ try:
+ handler.setLevel(level.upper())
+ except ValueError as e:
+ gha.warning(f'Failed to set log level {level}: {e}')
+
+
+if __name__ == "__main__":
+ gha = GithubAction()
+ options = dict(os.environ)
+
+ root_log_level = get_var('ROOT_LOG_LEVEL', options) or 'INFO'
+ set_log_level(logging.root, root_log_level, gha)
+ logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)5s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S %z')
+
+ log_level = get_var('LOG_LEVEL', options) or 'INFO'
+ set_log_level(logger, log_level, gha)
+ set_log_level(publish.logger, log_level, gha)
+ if log_level == 'DEBUG':
+ gha.echo(True)
+
+ settings = get_settings(options, gha)
+ logger.debug(f'Settings: {settings}')
+
+ main(settings, gha)
diff --git a/python/requirements-direct.txt b/python/requirements-direct.txt
new file mode 100644
index 0000000..2b17f6c
--- /dev/null
+++ b/python/requirements-direct.txt
@@ -0,0 +1,5 @@
+humanize==3.14.0
+junitparser==3.1.0
+lxml==4.9.3
+psutil==5.9.5
+PyGithub==2.1.1
diff --git a/python/requirements.txt b/python/requirements.txt
new file mode 100644
index 0000000..d44c3db
--- /dev/null
+++ b/python/requirements.txt
@@ -0,0 +1,21 @@
+humanize==3.14.0
+junitparser==3.1.0
+ future==0.18.3
+lxml==4.9.3
+psutil==5.9.5
+PyGithub==2.1.1
+ Deprecated==1.2.14
+ wrapt==1.16.0
+ PyJWT==2.8.0
+ PyNaCl==1.5.0
+ cffi==1.15.1
+ pycparser==2.21
+ python-dateutil==2.8.2
+ six==1.16.0
+ requests==2.31.0
+ certifi==2023.7.22
+ charset-normalizer==3.3.0
+ idna==3.4
+ urllib3==2.0.6
+ typing_extensions==4.7.1
+ urllib3==2.0.6
diff --git a/python/test/constraints.txt b/python/test/constraints.txt
new file mode 100644
index 0000000..f42c5f7
--- /dev/null
+++ b/python/test/constraints.txt
@@ -0,0 +1,2 @@
+# test_github.py fails with newer version
+Werkzeug<2.1.0
\ No newline at end of file
diff --git a/python/test/files/dart/json/README.md b/python/test/files/dart/json/README.md
new file mode 100644
index 0000000..8ad7313
--- /dev/null
+++ b/python/test/files/dart/json/README.md
@@ -0,0 +1,2 @@
+Example test results from https://github.com/dart-code-checker/dart-code-metrics @ 96001b5e78937be84270b6744898c82f9f0d9ddd
+
diff --git a/python/test/files/dart/json/tests.annotations b/python/test/files/dart/json/tests.annotations
new file mode 100644
index 0000000..aeac63b
--- /dev/null
+++ b/python/test/files/dart/json/tests.annotations
@@ -0,0 +1,118 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '2 errors, 1 fail, 1 skipped, 16 pass in 0s',
+ 'summary':
+ '20 tests\u2002\u2003\u200316 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '\u205f\u20044 suites\u2003\u2003\u205f\u20041 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004'
+ '1 files\u2004\u2002\u2003\u2003\u205f\u20041 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20032 '
+ '[:fire:](https://github.com/step-security/publish-unit-test-result-ac'
+ 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr'
+ 'yKuW1REiy4TYkVDpTHqKrp74w+au3nvY97DNzhXwyfWNYwbBzbAQLA4FBa0ImwJabB+6j'
+ 'PMxknpP8diDrhTK4pNwFmJFVGjz5BBp3LR31UwitIL/MsF/tekvi6wBOliZhf8/QAMgVR'
+ 'H4QAAAA==\n',
+ 'annotations': [
+ {
+ 'path':
+ 'file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/cli/cli_runner_test.dart',
+ 'start_line': 21,
+ 'end_line': 21,
+ 'annotation_level': 'warning',
+ 'message': 'json/tests.json\u2003[took 0s]',
+ 'title': 'Cli runner should have correct invocation failed',
+ 'raw_details':
+ "Expected: 'metrics [arguments] nope'\n "
+ "Actual: 'metrics [arguments] '\n Which: "
+ "is different. Both strings start the same, but the actual value is "
+ "missing the following trailing characters: nope\n\n"
+ "package:test_api expect\n"
+ "test/src/cli/cli_runner_test.dart 22:7 main.."
+ },
+ {
+ 'path':
+ 'file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/cli/utils/detect_sdk_path_test.dart',
+ 'start_line': 16,
+ 'end_line': 16,
+ 'annotation_level': 'failure',
+ 'message': 'json/tests.json\u2003[took 0s]',
+ 'title': 'detectSdkPath should return `null` if running inside VM with error',
+ 'raw_details':
+ 'Exception: exception\ntest/src/cli/utils/detect_sdk_path_test.dart '
+ '21:7 main..'
+ },
+ {
+ 'path':
+ 'file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/cli/utils/detect_sdk_path_test.dart',
+ 'start_line': 46,
+ 'end_line': 46,
+ 'annotation_level': 'failure',
+ 'message': 'json/tests.json\u2003[took 0s]',
+ 'title':
+ 'detectSdkPath should return null if sdk path is not found inside '
+ 'environment PATH variable with error',
+ 'raw_details':
+ "Instance of 'Error'\ntest/src/cli/utils/detect_sdk_path_test.dart "
+ "67:9 main.."
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There is 1 skipped test, see "Raw output" for the name of the '
+ 'skipped test.',
+ 'title': '1 skipped test found',
+ 'raw_details': 'Cli runner should have correct description'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 20 tests, see "Raw output" for the full list of tests.',
+ 'title': '20 tests found',
+ 'raw_details':
+ 'AnalysisOptions readIterableOfString returns iterables with data '
+ 'or not\nAnalysisOptions readMap returns map with data or not\n'
+ 'AnalysisOptions readMapOfMap returns map with data or not\n'
+ 'AnalysisOptions returns correct "folderPath" on posix platforms\n'
+ 'CheckUnnecessaryNullableCommand should have correct description\n'
+ 'CheckUnnecessaryNullableCommand should have correct invocation\n'
+ 'CheckUnnecessaryNullableCommand should have correct name\n'
+ 'CheckUnnecessaryNullableCommand should have correct usage\nCli '
+ 'runner run with version argument\nCli runner should have correct '
+ 'description\nCli runner should have correct invocation\n'
+ 'analysisOptionsFromFile constructs AnalysisOptions from extends '
+ 'config\nanalysisOptionsFromFile constructs AnalysisOptions from '
+ 'invalid file\nanalysisOptionsFromFile constructs AnalysisOptions '
+ 'from null\nanalysisOptionsFromFile constructs AnalysisOptions from '
+ 'valid file with single import\nanalysisOptionsFromFile constructs '
+ 'AnalysisOptions from yaml file\ndetectSdkPath should find sdk path '
+ 'inside environment PATH variable\ndetectSdkPath should return '
+ '`null` for non-Windows platforms\ndetectSdkPath should return '
+ '`null` if running inside VM\ndetectSdkPath should return null if '
+ 'sdk path is not found inside environment PATH variable'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/dart/json/tests.json b/python/test/files/dart/json/tests.json
new file mode 100644
index 0000000..c68e97a
--- /dev/null
+++ b/python/test/files/dart/json/tests.json
@@ -0,0 +1,73 @@
+{"protocolVersion":"0.1.1","runnerVersion":"1.23.1","pid":1719,"type":"start","time":0}
+{"suite":{"id":0,"platform":"vm","path":"test/src/cli/cli_runner_test.dart"},"type":"suite","time":0}
+{"test":{"id":1,"name":"loading test/src/cli/cli_runner_test.dart","suiteID":0,"groupIDs":[],"metadata":{"skip":false,"skipReason":null},"line":null,"column":null,"url":null},"type":"testStart","time":2}
+{"count":158,"time":16,"type":"allSuites"}
+{"suiteID":0,"observatory":"http://127.0.0.1:39993/KXsTuRgxBB0=/#/inspect?isolateId=isolates%2F1400071086986095&objectId=libraries%2F%4018482860","remoteDebugger":null,"type":"debug","time":9392}
+{"testID":1,"result":"success","skipped":false,"hidden":true,"type":"testDone","time":9409}
+{"group":{"id":2,"suiteID":0,"parentID":null,"name":"","metadata":{"skip":false,"skipReason":null},"testCount":3,"line":null,"column":null,"url":null},"type":"group","time":9416}
+{"group":{"id":3,"suiteID":0,"parentID":2,"name":"Cli runner","metadata":{"skip":false,"skipReason":null},"testCount":3,"line":13,"column":3,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/cli_runner_test.dart"},"type":"group","time":9419}
+{"test":{"id":4,"name":"Cli runner should have correct description","suiteID":0,"groupIDs":[2,3],"metadata":{"skip":true,"skipReason":"just skipping"},"line":14,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/cli_runner_test.dart"},"type":"testStart","time":9419}
+{"testID":4,"messageType":"skip","message":"Skip: just skipping","type":"print","time":9422}
+{"testID":4,"result":"success","skipped":true,"hidden":false,"type":"testDone","time":9424}
+{"test":{"id":5,"name":"Cli runner should have correct invocation","suiteID":0,"groupIDs":[2,3],"metadata":{"skip":false,"skipReason":null},"line":21,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/cli_runner_test.dart"},"type":"testStart","time":9425}
+{"testID":5,"error":"Expected: 'metrics [arguments] nope'\n Actual: 'metrics [arguments] '\n Which: is different. Both strings start the same, but the actual value is missing the following trailing characters: nope\n","stackTrace":"package:test_api expect\ntest/src/cli/cli_runner_test.dart 22:7 main..\n","isFailure":true,"type":"error","time":9501}
+{"testID":5,"result":"failure","skipped":false,"hidden":false,"type":"testDone","time":9502}
+{"group":{"id":6,"suiteID":0,"parentID":3,"name":"Cli runner run","metadata":{"skip":false,"skipReason":null},"testCount":1,"line":28,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/cli_runner_test.dart"},"type":"group","time":9503}
+{"test":{"id":7,"name":"Cli runner run with version argument","suiteID":0,"groupIDs":[2,3,6],"metadata":{"skip":false,"skipReason":null},"line":35,"column":7,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/cli_runner_test.dart"},"type":"testStart","time":9503}
+{"testID":7,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":9525}
+{"suite":{"id":8,"platform":"vm","path":"test/src/cli/commands/check_unnecessary_nullable_command_test.dart"},"type":"suite","time":13466}
+{"test":{"id":9,"name":"loading test/src/cli/commands/check_unnecessary_nullable_command_test.dart","suiteID":8,"groupIDs":[],"metadata":{"skip":false,"skipReason":null},"line":null,"column":null,"url":null},"type":"testStart","time":13466}
+{"suiteID":8,"observatory":"http://127.0.0.1:39993/KXsTuRgxBB0=/#/inspect?isolateId=isolates%2F974706369572371&objectId=libraries%2F%4018385818","remoteDebugger":null,"type":"debug","time":13897}
+{"testID":9,"result":"success","skipped":false,"hidden":true,"type":"testDone","time":13897}
+{"group":{"id":10,"suiteID":8,"parentID":null,"name":"","metadata":{"skip":false,"skipReason":null},"testCount":4,"line":null,"column":null,"url":null},"type":"group","time":13897}
+{"group":{"id":11,"suiteID":8,"parentID":10,"name":"CheckUnnecessaryNullableCommand","metadata":{"skip":false,"skipReason":null},"testCount":4,"line":41,"column":3,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/commands/check_unnecessary_nullable_command_test.dart"},"type":"group","time":13897}
+{"test":{"id":12,"name":"CheckUnnecessaryNullableCommand should have correct name","suiteID":8,"groupIDs":[10,11],"metadata":{"skip":false,"skipReason":null},"line":45,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/commands/check_unnecessary_nullable_command_test.dart"},"type":"testStart","time":13897}
+{"testID":12,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":13920}
+{"test":{"id":13,"name":"CheckUnnecessaryNullableCommand should have correct description","suiteID":8,"groupIDs":[10,11],"metadata":{"skip":false,"skipReason":null},"line":49,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/commands/check_unnecessary_nullable_command_test.dart"},"type":"testStart","time":13920}
+{"testID":13,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":13923}
+{"test":{"id":14,"name":"CheckUnnecessaryNullableCommand should have correct invocation","suiteID":8,"groupIDs":[10,11],"metadata":{"skip":false,"skipReason":null},"line":58,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/commands/check_unnecessary_nullable_command_test.dart"},"type":"testStart","time":13924}
+{"testID":14,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":13935}
+{"test":{"id":15,"name":"CheckUnnecessaryNullableCommand should have correct usage","suiteID":8,"groupIDs":[10,11],"metadata":{"skip":false,"skipReason":null},"line":65,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/commands/check_unnecessary_nullable_command_test.dart"},"type":"testStart","time":13935}
+{"testID":15,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":13942}
+{"suite":{"id":57,"platform":"vm","path":"test/src/cli/utils/detect_sdk_path_test.dart"},"type":"suite","time":39345}
+{"test":{"id":58,"name":"loading test/src/cli/utils/detect_sdk_path_test.dart","suiteID":57,"groupIDs":[],"metadata":{"skip":false,"skipReason":null},"line":null,"column":null,"url":null},"type":"testStart","time":39346}
+{"suiteID":57,"observatory":"http://127.0.0.1:39993/KXsTuRgxBB0=/#/inspect?isolateId=isolates%2F3444428955488735&objectId=libraries%2F%4018185026","remoteDebugger":null,"type":"debug","time":39717}
+{"testID":58,"result":"success","skipped":false,"hidden":true,"type":"testDone","time":39717}
+{"group":{"id":59,"suiteID":57,"parentID":null,"name":"","metadata":{"skip":false,"skipReason":null},"testCount":4,"line":null,"column":null,"url":null},"type":"group","time":39717}
+{"group":{"id":60,"suiteID":57,"parentID":59,"name":"detectSdkPath","metadata":{"skip":false,"skipReason":null},"testCount":4,"line":11,"column":3,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/utils/detect_sdk_path_test.dart"},"type":"group","time":39717}
+{"test":{"id":61,"name":"detectSdkPath should return `null` for non-Windows platforms","suiteID":57,"groupIDs":[59,60],"metadata":{"skip":false,"skipReason":null},"line":12,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/utils/detect_sdk_path_test.dart"},"type":"testStart","time":39717}
+{"testID":61,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":39740}
+{"test":{"id":62,"name":"detectSdkPath should return `null` if running inside VM","suiteID":57,"groupIDs":[59,60],"metadata":{"skip":false,"skipReason":null},"line":16,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/utils/detect_sdk_path_test.dart"},"type":"testStart","time":39740}
+{"testID":62,"error":"Exception: exception","stackTrace":"test/src/cli/utils/detect_sdk_path_test.dart 21:7 main..\n","isFailure":false,"type":"error","time":39748}
+{"testID":62,"result":"error","skipped":false,"hidden":false,"type":"testDone","time":39752}
+{"test":{"id":63,"name":"detectSdkPath should find sdk path inside environment PATH variable","suiteID":57,"groupIDs":[59,60],"metadata":{"skip":false,"skipReason":null},"line":24,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/utils/detect_sdk_path_test.dart"},"type":"testStart","time":39752}
+{"testID":63,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":39773}
+{"test":{"id":64,"name":"detectSdkPath should return null if sdk path is not found inside environment PATH variable","suiteID":57,"groupIDs":[59,60],"metadata":{"skip":false,"skipReason":null},"line":46,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/cli/utils/detect_sdk_path_test.dart"},"type":"testStart","time":39774}
+{"testID":64,"error":"Instance of 'Error'","stackTrace":"test/src/cli/utils/detect_sdk_path_test.dart 67:9 main..\n","isFailure":false,"type":"error","time":39778}
+{"testID":64,"result":"error","skipped":false,"hidden":false,"type":"testDone","time":39779}
+{"suite":{"id":65,"platform":"vm","path":"test/src/config_builder/models/analysis_options_test.dart"},"type":"suite","time":43677}
+{"test":{"id":66,"name":"loading test/src/config_builder/models/analysis_options_test.dart","suiteID":65,"groupIDs":[],"metadata":{"skip":false,"skipReason":null},"line":null,"column":null,"url":null},"type":"testStart","time":43677}
+{"suiteID":65,"observatory":"http://127.0.0.1:39993/KXsTuRgxBB0=/#/inspect?isolateId=isolates%2F2572154226386227&objectId=libraries%2F%4018279407","remoteDebugger":null,"type":"debug","time":44739}
+{"testID":66,"result":"success","skipped":false,"hidden":true,"type":"testDone","time":44741}
+{"group":{"id":67,"suiteID":65,"parentID":null,"name":"","metadata":{"skip":false,"skipReason":null},"testCount":9,"line":null,"column":null,"url":null},"type":"group","time":44741}
+{"group":{"id":68,"suiteID":65,"parentID":67,"name":"analysisOptionsFromFile constructs AnalysisOptions from","metadata":{"skip":false,"skipReason":null},"testCount":5,"line":39,"column":3,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/config_builder/models/analysis_options_test.dart"},"type":"group","time":44741}
+{"test":{"id":69,"name":"analysisOptionsFromFile constructs AnalysisOptions from null","suiteID":65,"groupIDs":[67,68],"metadata":{"skip":false,"skipReason":null},"line":40,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/config_builder/models/analysis_options_test.dart"},"type":"testStart","time":44742}
+{"testID":69,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":44794}
+{"test":{"id":70,"name":"analysisOptionsFromFile constructs AnalysisOptions from invalid file","suiteID":65,"groupIDs":[67,68],"metadata":{"skip":false,"skipReason":null},"line":46,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/config_builder/models/analysis_options_test.dart"},"type":"testStart","time":44795}
+{"testID":70,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":44796}
+{"test":{"id":71,"name":"analysisOptionsFromFile constructs AnalysisOptions from yaml file","suiteID":65,"groupIDs":[67,68],"metadata":{"skip":false,"skipReason":null},"line":55,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/config_builder/models/analysis_options_test.dart"},"type":"testStart","time":44796}
+{"testID":71,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":44822}
+{"test":{"id":72,"name":"analysisOptionsFromFile constructs AnalysisOptions from valid file with single import","suiteID":65,"groupIDs":[67,68],"metadata":{"skip":false,"skipReason":null},"line":115,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/config_builder/models/analysis_options_test.dart"},"type":"testStart","time":44823}
+{"testID":72,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":44826}
+{"test":{"id":73,"name":"analysisOptionsFromFile constructs AnalysisOptions from extends config","suiteID":65,"groupIDs":[67,68],"metadata":{"skip":false,"skipReason":null},"line":130,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/config_builder/models/analysis_options_test.dart"},"type":"testStart","time":44827}
+{"testID":73,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":44846}
+{"group":{"id":74,"suiteID":65,"parentID":67,"name":"AnalysisOptions","metadata":{"skip":false,"skipReason":null},"testCount":4,"line":151,"column":3,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/config_builder/models/analysis_options_test.dart"},"type":"group","time":44847}
+{"test":{"id":75,"name":"AnalysisOptions readIterableOfString returns iterables with data or not","suiteID":65,"groupIDs":[67,74],"metadata":{"skip":false,"skipReason":null},"line":152,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/config_builder/models/analysis_options_test.dart"},"type":"testStart","time":44847}
+{"testID":75,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":44850}
+{"test":{"id":76,"name":"AnalysisOptions readMap returns map with data or not","suiteID":65,"groupIDs":[67,74],"metadata":{"skip":false,"skipReason":null},"line":171,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/config_builder/models/analysis_options_test.dart"},"type":"testStart","time":44850}
+{"testID":76,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":44852}
+{"test":{"id":77,"name":"AnalysisOptions readMapOfMap returns map with data or not","suiteID":65,"groupIDs":[67,74],"metadata":{"skip":false,"skipReason":null},"line":191,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/config_builder/models/analysis_options_test.dart"},"type":"testStart","time":44852}
+{"testID":77,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":44854}
+{"test":{"id":78,"name":"AnalysisOptions returns correct \"folderPath\" on posix platforms","suiteID":65,"groupIDs":[67,74],"metadata":{"skip":false,"skipReason":null},"line":255,"column":5,"url":"file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/src/config_builder/models/analysis_options_test.dart"},"type":"testStart","time":44855}
+{"testID":78,"result":"success","skipped":false,"hidden":false,"type":"testDone","time":44856}
+{"success":false,"type":"done","time":1201464}
diff --git a/python/test/files/dart/json/tests.junit-xml b/python/test/files/dart/json/tests.junit-xml
new file mode 100644
index 0000000..5a38ed6
--- /dev/null
+++ b/python/test/files/dart/json/tests.junit-xml
@@ -0,0 +1,49 @@
+
+
+
+
+
+
+
+ [arguments] nope'
+ Actual: 'metrics [arguments] '
+ Which: is different. Both strings start the same, but the actual value is missing the following trailing characters: nope
+
+package:test_api expect
+test/src/cli/cli_runner_test.dart 22:7 main..
+]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ .
+]]>
+
+
+
+ .
+]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/dart/json/tests.results b/python/test/files/dart/json/tests.results
new file mode 100644
index 0000000..10e2931
--- /dev/null
+++ b/python/test/files/dart/json/tests.results
@@ -0,0 +1,344 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=4,
+ suite_tests=20,
+ suite_skipped=1,
+ suite_failures=1,
+ suite_errors=2,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='test/src/cli/cli_runner_test.dart',
+ tests=3,
+ skipped=1,
+ failures=1,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='test/src/cli/commands/check_unnecessary_nullable_command_test.dart',
+ tests=4,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='test/src/cli/utils/detect_sdk_path_test.dart',
+ tests=4,
+ skipped=0,
+ failures=0,
+ errors=2,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='test/src/config_builder/models/analysis_options_test.dart',
+ tests=9,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/cli/cli_runner_test.dart',
+ line=14,
+ class_name=None,
+ test_name='Cli runner should have correct description',
+ result='skipped',
+ message='Skip: just skipping',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/cli/cli_runner_test.dart',
+ line=21,
+ class_name=None,
+ test_name='Cli runner should have correct invocation',
+ result='failure',
+ message="Expected: 'metrics [arguments] nope'\n "
+ "Actual: 'metrics [arguments] '\n Which: is "
+ "different. Both strings start the same, but the actual value is "
+ "missing the following trailing characters: nope\n",
+ content="Expected: 'metrics [arguments] nope'\n "
+ "Actual: 'metrics [arguments] '\n Which: is "
+ "different. Both strings start the same, but the actual value is "
+ "missing the following trailing characters: nope\n\npackage:test_api "
+ " expect\ntest/src/cli/cli_runner_test.dart "
+ "22:7 main..\n",
+ stdout=None,
+ stderr=None,
+ time=0.077
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/cli/cli_runner_test.dart',
+ line=35,
+ class_name=None,
+ test_name='Cli runner run with version argument',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.022
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/cli/commands/check_unnecessary_nullable_command_test.dart',
+ line=45,
+ class_name=None,
+ test_name='CheckUnnecessaryNullableCommand should have correct name',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.023
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/cli/commands/check_unnecessary_nullable_command_test.dart',
+ line=49,
+ class_name=None,
+ test_name='CheckUnnecessaryNullableCommand should have correct description',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/cli/commands/check_unnecessary_nullable_command_test.dart',
+ line=58,
+ class_name=None,
+ test_name='CheckUnnecessaryNullableCommand should have correct invocation',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.011
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/cli/commands/check_unnecessary_nullable_command_test.dart',
+ line=65,
+ class_name=None,
+ test_name='CheckUnnecessaryNullableCommand should have correct usage',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.007
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/cli/utils/detect_sdk_path_test.dart',
+ line=12,
+ class_name=None,
+ test_name='detectSdkPath should return `null` for non-Windows platforms',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.023
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/cli/utils/detect_sdk_path_test.dart',
+ line=16,
+ class_name=None,
+ test_name='detectSdkPath should return `null` if running inside VM',
+ result='error',
+ message='Exception: exception',
+ content='Exception: exception\ntest/src/cli/utils/detect_sdk_path_test.dart '
+ '21:7 main..\n',
+ stdout=None,
+ stderr=None,
+ time=0.012
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/cli/utils/detect_sdk_path_test.dart',
+ line=24,
+ class_name=None,
+ test_name='detectSdkPath should find sdk path inside environment PATH variable',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.021
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/cli/utils/detect_sdk_path_test.dart',
+ line=46,
+ class_name=None,
+ test_name='detectSdkPath should return null if sdk path is not found inside '
+ 'environment PATH variable',
+ result='error',
+ message="Instance of 'Error'",
+ content="Instance of 'Error'\ntest/src/cli/utils/detect_sdk_path_test.dart "
+ "67:9 main..\n",
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/config_builder/models/analysis_options_test.dart',
+ line=40,
+ class_name=None,
+ test_name='analysisOptionsFromFile constructs AnalysisOptions from null',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.052
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/config_builder/models/analysis_options_test.dart',
+ line=46,
+ class_name=None,
+ test_name='analysisOptionsFromFile constructs AnalysisOptions from invalid file',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/config_builder/models/analysis_options_test.dart',
+ line=55,
+ class_name=None,
+ test_name='analysisOptionsFromFile constructs AnalysisOptions from yaml file',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.026
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/config_builder/models/analysis_options_test.dart',
+ line=115,
+ class_name=None,
+ test_name='analysisOptionsFromFile constructs AnalysisOptions from valid file '
+ 'with single import',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/config_builder/models/analysis_options_test.dart',
+ line=130,
+ class_name=None,
+ test_name='analysisOptionsFromFile constructs AnalysisOptions from extends '
+ 'config',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.019
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/config_builder/models/analysis_options_test.dart',
+ line=152,
+ class_name=None,
+ test_name='AnalysisOptions readIterableOfString returns iterables with data or '
+ 'not',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/config_builder/models/analysis_options_test.dart',
+ line=171,
+ class_name=None,
+ test_name='AnalysisOptions readMap returns map with data or not',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/config_builder/models/analysis_options_test.dart',
+ line=191,
+ class_name=None,
+ test_name='AnalysisOptions readMapOfMap returns map with data or not',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='json/tests.json',
+ test_file='file:///home/runner/work/dart-code-metrics/dart-code-metrics/test/'
+ 'src/config_builder/models/analysis_options_test.dart',
+ line=255,
+ class_name=None,
+ test_name='AnalysisOptions returns correct "folderPath" on posix platforms',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/json/empty.exception b/python/test/files/json/empty.exception
new file mode 100644
index 0000000..97498c4
--- /dev/null
+++ b/python/test/files/json/empty.exception
@@ -0,0 +1 @@
+ParseError: file='files/json/empty.json', message='File is empty.', line=None, column=None, exception=Exception('File is empty.')
\ No newline at end of file
diff --git a/python/test/files/json/empty.json b/python/test/files/json/empty.json
new file mode 100644
index 0000000..e69de29
diff --git a/python/test/files/json/malformed-json.exception b/python/test/files/json/malformed-json.exception
new file mode 100644
index 0000000..f3eb10d
--- /dev/null
+++ b/python/test/files/json/malformed-json.exception
@@ -0,0 +1 @@
+ParseError: file='files/json/malformed-json.json', message='Expecting property name enclosed in double quotes: line 1 column 16 (char 15)', line=None, column=None, exception=JSONDecodeError('Expecting property name enclosed in double quotes: line 1 column 16 (char 15)')
\ No newline at end of file
diff --git a/python/test/files/json/malformed-json.json b/python/test/files/json/malformed-json.json
new file mode 100644
index 0000000..11401bc
--- /dev/null
+++ b/python/test/files/json/malformed-json.json
@@ -0,0 +1 @@
+{"key": "val", malformed: "key"}
diff --git a/python/test/files/json/non-json.exception b/python/test/files/json/non-json.exception
new file mode 100644
index 0000000..05d8c27
--- /dev/null
+++ b/python/test/files/json/non-json.exception
@@ -0,0 +1 @@
+ParseError: file='files/non-json.json', message='Expecting value: line 1 column 1 (char 0)', line=None, column=None, exception=JSONDecodeError('Expecting value: line 1 column 1 (char 0)')
\ No newline at end of file
diff --git a/python/test/files/json/non-json.json b/python/test/files/json/non-json.json
new file mode 100644
index 0000000..ede72e7
--- /dev/null
+++ b/python/test/files/json/non-json.json
@@ -0,0 +1 @@
+this is not json
diff --git a/python/test/files/json/not-existing.exception b/python/test/files/json/not-existing.exception
new file mode 100644
index 0000000..a3092b3
--- /dev/null
+++ b/python/test/files/json/not-existing.exception
@@ -0,0 +1 @@
+ParseError: file='files/json/not-existing.json', message='File does not exist.', line=None, column=None, exception=FileNotFoundError('File does not exist.')
\ No newline at end of file
diff --git a/python/test/files/junit-xml/bazel/suite-logs.annotations b/python/test/files/junit-xml/bazel/suite-logs.annotations
new file mode 100644
index 0000000..f0365bb
--- /dev/null
+++ b/python/test/files/junit-xml/bazel/suite-logs.annotations
@@ -0,0 +1,79 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '1 errors in 0s',
+ 'summary':
+ '1 tests\u2002\u2003\u20030 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20030 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20030 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 '
+ '[:fire:](https://github.com/step-security/publish-unit-test-result-ac'
+ 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIBAEv'
+ '0KoLbT1M4QgxIsC5g4q498liHp0OzvJnNLBbknOYhqEpAzpgyWjThBDwbFgEelVdSvKxn'
+ 'CpaIOjO5yGvTssYsQWwRyITZ57+K9VZrHKvGWi95AKtCVo1fK6AX55nzvdAAAA\n',
+ 'annotations': [
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'failure',
+ 'message': 'bazel/suite-logs.xml\u2003[took 0s]',
+ 'title': 'bazel/failing_absl_test with error',
+ 'raw_details': 'exited with error code 1'
+ },
+ {
+ 'path': 'bazel/failing_absl_test',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'Test suite bazel/failing_absl_test has the following stdout output '
+ '(see Raw output).',
+ 'title': 'Logging on stdout of test suite bazel/failing_absl_test',
+ 'raw_details':
+ 'Generated test.log (if the file is not UTF-8, then this may be '
+ 'unreadable):\nexec ${PAGER:-/usr/bin/less} "$0" || exit 1\n'
+ 'Executing tests from //bazel:failing_absl_test\n'
+ '-------------------------------------------------------------------'
+ '----------\nTraceback (most recent call last):\n File '
+ '"", line 3, in \n import non_existent_package\n'
+ 'ModuleNotFoundError: No module named \'non_existent_package\''
+ },
+ {
+ 'path': 'bazel/failing_absl_test',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message':
+ 'Test suite bazel/failing_absl_test has the following stderr output '
+ '(see Raw output).',
+ 'title': 'Logging on stderr of test suite bazel/failing_absl_test',
+ 'raw_details': 'Generated test.err'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There is 1 test, see "Raw output" for the name of the test.',
+ 'title': '1 test found',
+ 'raw_details': 'bazel/failing_absl_test'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/bazel/suite-logs.junit-xml b/python/test/files/junit-xml/bazel/suite-logs.junit-xml
new file mode 100644
index 0000000..2a3c1e5
--- /dev/null
+++ b/python/test/files/junit-xml/bazel/suite-logs.junit-xml
@@ -0,0 +1,19 @@
+
+
+
+
+
+Generated test.log (if the file is not UTF-8, then this may be unreadable):
+exec ${PAGER:-/usr/bin/less} "$0" || exit 1
+Executing tests from //bazel:failing_absl_test
+-----------------------------------------------------------------------------
+Traceback (most recent call last):
+ File "<reducted>", line 3, in <module>
+ import non_existent_package
+ModuleNotFoundError: No module named 'non_existent_package'
+
+
+Generated test.err
+
+
+
diff --git a/python/test/files/junit-xml/bazel/suite-logs.results b/python/test/files/junit-xml/bazel/suite-logs.results
new file mode 100644
index 0000000..e9a777d
--- /dev/null
+++ b/python/test/files/junit-xml/bazel/suite-logs.results
@@ -0,0 +1,42 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=1,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=1,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='bazel/failing_absl_test',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=1,
+ stdout='Generated test.log (if the file is not UTF-8, then this may be '
+ 'unreadable):\nexec ${PAGER:-/usr/bin/less} "$0" || exit 1\nExecuting '
+ 'tests from //bazel:failing_absl_test\n'
+ '---------------------------------------------------------------------'
+ '--------\nTraceback (most recent call last):\n File "", '
+ 'line 3, in \n import non_existent_package\n'
+ 'ModuleNotFoundError: No module named \'non_existent_package\'',
+ stderr='Generated test.err'
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='bazel/suite-logs.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='bazel/failing_absl_test',
+ result='error',
+ message='exited with error code 1',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/bazel/suite-logs.xml b/python/test/files/junit-xml/bazel/suite-logs.xml
new file mode 100644
index 0000000..adcf19b
--- /dev/null
+++ b/python/test/files/junit-xml/bazel/suite-logs.xml
@@ -0,0 +1,19 @@
+
+
+
+
+
+Generated test.log (if the file is not UTF-8, then this may be unreadable):
+", line 3, in
+ import non_existent_package
+ModuleNotFoundError: No module named 'non_existent_package']]>
+
+
+Generated test.err
+
+
+
diff --git a/python/test/files/junit-xml/jest/jest-junit.annotations b/python/test/files/junit-xml/jest/jest-junit.annotations
new file mode 100644
index 0000000..c521c6d
--- /dev/null
+++ b/python/test/files/junit-xml/jest/jest-junit.annotations
@@ -0,0 +1,39 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 2 tests pass in 0s',
+ 'summary':
+ '2 tests\u2002\u2003\u20032 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20030 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20030 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr'
+ '0KoLdTSyxCCEDfyMQtUxrsLCgrdvJnJO6kCLT1dyDQQ6iOED9aIPICzCceEaQh5mmtmPg'
+ 'rRFzsc7ZspDrorJKLD0mC01Zdjq3v5tz3cyB5uXcIZAyFBScRvnF43yWbLod0AAAA=\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 2 tests, see "Raw output" for the full list of tests.',
+ 'title': '2 tests found',
+ 'raw_details': 'Load widget via link\nMount iframe'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/jest/jest-junit.junit-xml b/python/test/files/junit-xml/jest/jest-junit.junit-xml
new file mode 100644
index 0000000..bfb1490
--- /dev/null
+++ b/python/test/files/junit-xml/jest/jest-junit.junit-xml
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/jest/jest-junit.results b/python/test/files/junit-xml/jest/jest-junit.results
new file mode 100644
index 0000000..058c504
--- /dev/null
+++ b/python/test/files/junit-xml/jest/jest-junit.results
@@ -0,0 +1,49 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=2,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='widget.test.js',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='jest/jest-junit.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='Load widget via link',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.272
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='jest/jest-junit.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='Mount iframe',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.023
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/jest/jest-junit.xml b/python/test/files/junit-xml/jest/jest-junit.xml
new file mode 100644
index 0000000..63ae5fd
--- /dev/null
+++ b/python/test/files/junit-xml/jest/jest-junit.xml
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/junit-xml/junit.multiresult.annotations b/python/test/files/junit-xml/junit.multiresult.annotations
new file mode 100644
index 0000000..7350421
--- /dev/null
+++ b/python/test/files/junit-xml/junit.multiresult.annotations
@@ -0,0 +1,83 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '1 errors, 1 fail, 1 skipped, 1 pass in 1s',
+ 'summary':
+ '1 files\u2004\u20031 suites\u2004\u2003\u20021s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '4 tests\u20031 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '1 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '1 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\u20031 '
+ '[:fire:](https://github.com/step-security/publish-unit-test-result-ac'
+ 'tion/blob/VERSION/README.md#the-symbols "test errors")\n4 runs\u2006\u2003'
+ '-2 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '3 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '2 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\u20031 '
+ '[:fire:](https://github.com/step-security/publish-unit-test-result-ac'
+ 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr'
+ '0KotfBTeRlCEONGPmYXKuPdlQhEujdvkrn4BkYTX9jQMU4RQoU1ogzgXcZXhKTmsgVFpf'
+ '5S0AFnc2wSTHNoRI/5wehKL82S68d6fLmpcK5V/48pby2EF/JitEt+P6y+BE/eAAAA\n',
+ 'annotations': [
+ {
+ 'path': 'test class',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'failure',
+ 'message': 'junit.multiresult.xml\u2003[took 0s]',
+ 'title': 'test that errors (test class) with error',
+ 'raw_details': 'test teardown failure\nstdout'
+ },
+ {
+ 'path': 'test class',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'junit.multiresult.xml\u2003[took 0s]',
+ 'title': 'test that fails (test class) failed',
+ 'raw_details': 'test failure\nAssertion failed'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There is 1 skipped test, see "Raw output" for the name of the '
+ 'skipped test.',
+ 'title': '1 skipped test found',
+ 'raw_details': 'test class ‑ test that is skipped'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 4 tests, see "Raw output" for the full list of tests.',
+ 'title': '4 tests found',
+ 'raw_details':
+ 'test class ‑ test that errors\ntest class ‑ test that fails\ntest '
+ 'class ‑ test that is skipped\ntest class ‑ test that succeeds'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/junit.multiresult.junit-xml b/python/test/files/junit-xml/junit.multiresult.junit-xml
new file mode 100644
index 0000000..7136fe3
--- /dev/null
+++ b/python/test/files/junit-xml/junit.multiresult.junit-xml
@@ -0,0 +1,19 @@
+
+
+
+
+
+ Assertion failed
+ stdout
+
+
+
+ Assertion failed
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/junit.multiresult.results b/python/test/files/junit-xml/junit.multiresult.results
new file mode 100644
index 0000000..8b2acc1
--- /dev/null
+++ b/python/test/files/junit-xml/junit.multiresult.results
@@ -0,0 +1,75 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=4,
+ suite_skipped=3,
+ suite_failures=2,
+ suite_errors=1,
+ suite_time=1,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='test suite',
+ tests=4,
+ skipped=3,
+ failures=2,
+ errors=1,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='junit.multiresult.xml',
+ test_file=None,
+ line=None,
+ class_name='test class',
+ test_name='test that errors',
+ result='error',
+ message='test teardown failure',
+ content='stdout',
+ stdout=None,
+ stderr=None,
+ time=0.123
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='junit.multiresult.xml',
+ test_file=None,
+ line=None,
+ class_name='test class',
+ test_name='test that fails',
+ result='failure',
+ message='test failure',
+ content='Assertion failed',
+ stdout=None,
+ stderr=None,
+ time=0.234
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='junit.multiresult.xml',
+ test_file=None,
+ line=None,
+ class_name='test class',
+ test_name='test that is skipped',
+ result='skipped',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.345
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='junit.multiresult.xml',
+ test_file=None,
+ line=None,
+ class_name='test class',
+ test_name='test that succeeds',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.456
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/junit.multiresult.xml b/python/test/files/junit-xml/junit.multiresult.xml
new file mode 100644
index 0000000..172839e
--- /dev/null
+++ b/python/test/files/junit-xml/junit.multiresult.xml
@@ -0,0 +1,19 @@
+
+
+
+
+
+ Assertion failed
+ stdout
+
+
+
+ Assertion failed
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/junit-xml/minimal-attributes.annotations b/python/test/files/junit-xml/minimal-attributes.annotations
new file mode 100644
index 0000000..c1920cc
--- /dev/null
+++ b/python/test/files/junit-xml/minimal-attributes.annotations
@@ -0,0 +1,70 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '1 errors, 1 fail, 1 skipped, 1 pass in 0s',
+ 'summary':
+ '4 tests\u2002\u2003\u20031 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20031 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20031 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 '
+ '[:fire:](https://github.com/step-security/publish-unit-test-result-ac'
+ 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr'
+ '0KoLTSx8jKGIMSNfMwClfHuAoJC92Z2MxeVoISjC5kGQl0A/8EWkHmwJuIYMR58Os11ry'
+ '5wXn6LOODshGSgOiEQLRaDwdRemm3u5b+WuYllblvcag0+QlnE7YzeD8XajRvdAAAA\n',
+ 'annotations': [
+ {
+ 'path': 'ClassName',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'minimal-attributes.xml',
+ 'title': 'failed_test (ClassName) failed'
+ },
+ {
+ 'path': 'ClassName',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'failure',
+ 'message': 'minimal-attributes.xml',
+ 'title': 'error_test (ClassName) with error'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There is 1 skipped test, see "Raw output" for the name of the '
+ 'skipped test.',
+ 'title': '1 skipped test found',
+ 'raw_details': 'ClassName ‑ skipped_test'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 4 tests, see "Raw output" for the full list of tests.',
+ 'title': '4 tests found',
+ 'raw_details':
+ 'ClassName ‑ error_test\nClassName ‑ failed_test\nClassName ‑ '
+ 'skipped_test\nClassName ‑ test_name'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/minimal-attributes.junit-xml b/python/test/files/junit-xml/minimal-attributes.junit-xml
new file mode 100644
index 0000000..92ca206
--- /dev/null
+++ b/python/test/files/junit-xml/minimal-attributes.junit-xml
@@ -0,0 +1,15 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/minimal-attributes.results b/python/test/files/junit-xml/minimal-attributes.results
new file mode 100644
index 0000000..83a9370
--- /dev/null
+++ b/python/test/files/junit-xml/minimal-attributes.results
@@ -0,0 +1,75 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=4,
+ suite_skipped=1,
+ suite_failures=1,
+ suite_errors=1,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name=None,
+ tests=4,
+ skipped=1,
+ failures=1,
+ errors=1,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='minimal-attributes.xml',
+ test_file=None,
+ line=None,
+ class_name='ClassName',
+ test_name='test_name',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=None
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='minimal-attributes.xml',
+ test_file=None,
+ line=None,
+ class_name='ClassName',
+ test_name='skipped_test',
+ result='skipped',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=None
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='minimal-attributes.xml',
+ test_file=None,
+ line=None,
+ class_name='ClassName',
+ test_name='failed_test',
+ result='failure',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=None
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='minimal-attributes.xml',
+ test_file=None,
+ line=None,
+ class_name='ClassName',
+ test_name='error_test',
+ result='error',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=None
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/minimal-attributes.xml b/python/test/files/junit-xml/minimal-attributes.xml
new file mode 100644
index 0000000..2d99719
--- /dev/null
+++ b/python/test/files/junit-xml/minimal-attributes.xml
@@ -0,0 +1,15 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/junit-xml/mocha/latex-utensils.annotations b/python/test/files/junit-xml/mocha/latex-utensils.annotations
new file mode 100644
index 0000000..fe84823
--- /dev/null
+++ b/python/test/files/junit-xml/mocha/latex-utensils.annotations
@@ -0,0 +1,132 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 101 tests pass in 0s',
+ 'summary':
+ '\u205f\u2004\u205f\u20041 files\u2004\u2003\u205f\u2004\u205f\u20041 '
+ 'suites\u2004\u2003\u20020s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '101 tests\u2003101 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '0 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '0 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n109 runs\u2006\u2003'
+ '109 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '0 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '0 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MMQ6AIBAEv'
+ '0KoLaDUzxCCEC8imAMq498liIJ2N7O5OagBqwOdCB8IDQniC3NCGcG7jCxjHmKZGH9IhK'
+ 'TUX62w9x/CSLAfoRE9VoPJ3c2xQks204qFu2Dhvqf8tkHMUC8SFknPC30yEpLlAAAA\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 101 tests, see "Raw output" for the full list of tests.',
+ 'title': '101 tests found',
+ 'raw_details':
+ 'bibtexParser ‑ parse @comment @article @comment\nbibtexParser ‑ '
+ 'parse a simple bib file\nbibtexParser ‑ parse an entry with only '
+ 'key\nbibtexParser ‑ parse bib with abbreviation\nbibtexParser ‑ '
+ 'parse bib with comments\nbibtexParser ‑ parse entry with command\n'
+ 'bibtexParser ‑ parse entry with concat\nbibtexParser ‑ parse entry '
+ 'with empty citeky\nbibtexParser ‑ parse entry with quotes\n'
+ 'bibtexParser ‑ parse fields ending ,\nbibtexParser ‑ parse only '
+ '@comment\nbibtexParser ‑ should not throw SyntaxError\n'
+ 'bibtexParser ‑ should throw SyntaxError\nlatexLogParser ‑ parse '
+ 'LaTeX log files\nlatexLogParser ‑ parse LaTeX log files generated '
+ 'with -halt-on-error\nlatexParser matchers findAll ‑ test '
+ 'latexParser.findAll\nlatexParser matchers findAllSeqences ‑ test '
+ 'latexParser.findAllSeqences\nlatexParser matchers latexParser '
+ 'findNodeAt ‑ test latexParser.findNodeAt\nlatexParser matchers '
+ 'latexParser findNodeAt ‑ test latexParser.findNodeAt with line and '
+ 'column\nlatexParser matchers latexParser findNodeAt ‑ test '
+ 'latexParser.findNodeAt with line and column for multiple lines\n'
+ 'latexParser matchers pattern ‑ test latexParser.pattern\n'
+ 'latexParser matchers pattern ‑ test latexParser.pattern.match\n'
+ 'latexParser matchers type ‑ test that properties having a '
+ 'Node-related-type value are only content, args, and arg.\n'
+ 'latexParser matchers type ‑ test the types of content, arg, and '
+ 'args.\nlatexParser other ‑ test type guard\nlatexParser other ‑ '
+ 'test type guard with assingment and never type\nlatexParser parse '
+ '2 ‑ parse Sch\\"onbrunner Schlo\\ss{} Stra\\ss e\nlatexParser '
+ 'parse 2 ‑ parse \\"\\i\nlatexParser parse 2 ‑ parse a\\\\b '
+ 'c\\newline\nlatexParser parse 2 ‑ parse space + \\begin{center}\n'
+ 'latexParser parse 2 ‑ parse x {a} { b }d\nlatexParser parse 2 ‑ '
+ 'parse { a }d\nlatexParser parse ‑ parse $ $, including only spaces\n'
+ 'latexParser parse ‑ parse $ a ^ b $\nlatexParser parse ‑ parse $$ '
+ '$$\nlatexParser parse ‑ parse $1$\nlatexParser parse ‑ parse '
+ '$\\left(1\\right]$\nlatexParser parse ‑ parse $\\left.1\\right]$\n'
+ 'latexParser parse ‑ parse $a^b$\nlatexParser parse ‑ parse $a^b$ '
+ 'with {enableMathCharacterLocation: true}\nlatexParser parse ‑ '
+ 'parse \\( \\)\nlatexParser parse ‑ parse \\[ \\]\nlatexParser '
+ 'parse ‑ parse \\begin{align} \\begin{alignedat}\nlatexParser parse '
+ '‑ parse \\begin{align} \\begin{aligned}\nlatexParser parse ‑ parse '
+ '\\begin{align} \\end{align}\nlatexParser parse ‑ parse '
+ '\\begin{align}...\nlatexParser parse ‑ parse \\begin{center} '
+ '\\begin{itemize}\nlatexParser parse ‑ parse \\begin{center}...\n'
+ 'latexParser parse ‑ parse \\begin{center}\\endcommand\nlatexParser '
+ 'parse ‑ parse \\begin{lstlisting}...\nlatexParser parse ‑ parse '
+ '\\begin{minted}...\nlatexParser parse ‑ parse '
+ '\\begin{verbatim*}...\nlatexParser parse ‑ parse '
+ '\\begin{verbatim}...\nlatexParser parse ‑ parse '
+ '\\begin{verbatim}... 02\nlatexParser parse ‑ parse \\def\\abc '
+ '[#1]#2 {#2#1abc}\nlatexParser parse ‑ parse \\def\\abc{abc}\n'
+ 'latexParser parse ‑ parse \\href\nlatexParser parse ‑ parse '
+ '\\label{a_b}\nlatexParser parse ‑ parse \\linebreakMyCommand\n'
+ 'latexParser parse ‑ parse \\newlineMyCommand\nlatexParser parse ‑ '
+ 'parse \\node[label={abc}, efg]\nlatexParser parse ‑ parse '
+ '\\par\\par\nlatexParser parse ‑ parse \\part\nlatexParser parse ‑ '
+ 'parse \\url\nlatexParser parse ‑ parse \\verb*|1|\nlatexParser '
+ 'parse ‑ parse \\verbatimfont{\\small}\nlatexParser parse ‑ parse '
+ '\\verb|1|\nlatexParser parse ‑ parse a command whose name has @\n'
+ 'latexParser parse ‑ parse a^b\nlatexParser parse ‑ parse a_b\n'
+ 'latexParser parse ‑ parse an optional argument having only spaces\n'
+ 'latexParser parse ‑ parse comments\nlatexParser parse ‑ parse '
+ 'empty preamble\nlatexParser parse ‑ parse invalid commands without '
+ 'error\nlatexParser parse ‑ parse newenvironment command\n'
+ 'latexParser parse ‑ parse optional arguments having a tilde\n'
+ 'latexParser parse ‑ parse optional arguments having spaces\n'
+ 'latexParser parse ‑ parse preamble\nlatexParser parse ‑ parse '
+ 'unbalanced \\begin\nlatexParser parse ‑ parse unbalanced '
+ '\\begin{aligned}\nlatexParser parse ‑ parse unbalanced \\end\n'
+ 'latexParser parse ‑ parse unbalanced \\end{aligned}\nlatexParser '
+ 'parse ‑ parse { }, including only spaces\nlatexParser parse ‑ '
+ 'parse ~\nlatexParser parse ‑ should throw SyntaxError\nlatexParser '
+ 'stringify ‑ test latexParser.stringify a b\nlatexParser stringify '
+ '‑ test latexParser.stringify a b\nlatexParser stringify ‑ test '
+ 'latexParser.stringify a\\nb\nlatexParser stringify ‑ test '
+ 'latexParser.stringify a_b\nlatexParser stringify ‑ test '
+ 'latexParser.stringify newcommand 01\nlatexParser stringify ‑ test '
+ 'latexParser.stringify newcommand 02\nlatexParser stringify ‑ test '
+ 'latexParser.stringify newcommand 03\nlatexParser stringify ‑ test '
+ 'latexParser.stringify with lineBreak 01\nlatexParser stringify ‑ '
+ 'test latexParser.stringify with lineBreak 02\nlatexParser '
+ 'stringify ‑ test stringify $ \\sin x$\nlatexParser stringify ‑ '
+ 'test stringify $a^b$\nlatexParser stringify ‑ test stringify '
+ '\\def\\abc [#1]#2 {#2#1abc}\nlatexParser stringify ‑ test '
+ 'stringify \\href[]{}{}\nlatexParser stringify ‑ test stringify '
+ '\\href{}{}\nlatexParser stringify ‑ test stringify \\url\n'
+ 'latexParser stringify ‑ test stringify a_b'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/mocha/latex-utensils.junit-xml b/python/test/files/junit-xml/mocha/latex-utensils.junit-xml
new file mode 100644
index 0000000..93d4973
--- /dev/null
+++ b/python/test/files/junit-xml/mocha/latex-utensils.junit-xml
@@ -0,0 +1,112 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/mocha/latex-utensils.results b/python/test/files/junit-xml/mocha/latex-utensils.results
new file mode 100644
index 0000000..3cab102
--- /dev/null
+++ b/python/test/files/junit-xml/mocha/latex-utensils.results
@@ -0,0 +1,1441 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=109,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='Mocha Tests',
+ tests=109,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='bibtexParser',
+ test_name='parse a simple bib file',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='bibtexParser',
+ test_name='parse fields ending ,',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='bibtexParser',
+ test_name='parse an entry with only key',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='bibtexParser',
+ test_name='parse bib with comments',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='bibtexParser',
+ test_name='parse only @comment',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='bibtexParser',
+ test_name='parse @comment @article @comment',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='bibtexParser',
+ test_name='parse bib with abbreviation',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='bibtexParser',
+ test_name='parse entry with empty citeky',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='bibtexParser',
+ test_name='parse entry with command',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='bibtexParser',
+ test_name='parse entry with command',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='bibtexParser',
+ test_name='parse entry with concat',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='bibtexParser',
+ test_name='parse entry with quotes',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='bibtexParser',
+ test_name='should not throw SyntaxError',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='bibtexParser',
+ test_name='should throw SyntaxError',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexLogParser',
+ test_name='parse LaTeX log files',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.117
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexLogParser',
+ test_name='parse LaTeX log files generated with -halt-on-error',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.071
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser matchers findAll',
+ test_name='test latexParser.findAll',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.007
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser matchers findAllSeqences',
+ test_name='test latexParser.findAllSeqences',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser matchers pattern',
+ test_name='test latexParser.pattern',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser matchers pattern',
+ test_name='test latexParser.pattern.match',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser matchers pattern',
+ test_name='test latexParser.pattern',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser matchers latexParser findNodeAt',
+ test_name='test latexParser.findNodeAt',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser matchers latexParser findNodeAt',
+ test_name='test latexParser.findNodeAt',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser matchers latexParser findNodeAt',
+ test_name='test latexParser.findNodeAt',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser matchers latexParser findNodeAt',
+ test_name='test latexParser.findNodeAt with line and column',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser matchers latexParser findNodeAt',
+ test_name='test latexParser.findNodeAt with line and column for multiple lines',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser matchers type',
+ test_name='test that properties having a Node-related-type value are only '
+ 'content, args, and arg.',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser matchers type',
+ test_name='test the types of content, arg, and args.',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\begin{center}...',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\begin{center}\\endcommand',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\begin{center} \\begin{itemize}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse unbalanced \\begin',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse unbalanced \\end',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse $1$',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\url',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\url',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\href',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\href',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\verb|1|',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\verb*|1|',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\verbatimfont{\\small}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\begin{verbatim}...',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\begin{verbatim}... 02',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\begin{verbatim*}...',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\begin{minted}...',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\begin{lstlisting}...',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse comments',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\begin{align}...',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\begin{align}...',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse unbalanced \\begin{aligned}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse newenvironment command',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse an optional argument having only spaces',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse optional arguments having spaces',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse optional arguments having a tilde',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\node[label={abc}, efg]',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\def\\abc{abc}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\def\\abc [#1]#2 {#2#1abc}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse a command whose name has @',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse invalid commands without error',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\part',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\par\\par',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\newlineMyCommand',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\linebreakMyCommand',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\label{a_b}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse a_b',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse a^b',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse $a^b$',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse $a^b$ with {enableMathCharacterLocation: true}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse ~',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='should throw SyntaxError',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse $ a ^ b $',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse $\\left(1\\right]$',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse $\\left.1\\right]$',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse { }, including only spaces',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse $ $, including only spaces',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\( \\)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\[ \\]',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse $$ $$',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\begin{align} \\end{align}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\begin{align} \\begin{aligned}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse \\begin{align} \\begin{alignedat}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse unbalanced \\begin{aligned}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse unbalanced \\end{aligned}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse preamble',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse',
+ test_name='parse empty preamble',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse 2',
+ test_name='parse \\"\\i',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse 2',
+ test_name='parse Sch\\"onbrunner Schlo\\ss{} Stra\\ss e',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse 2',
+ test_name='parse a\\\\b c\\newline',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse 2',
+ test_name='parse space + \\begin{center}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse 2',
+ test_name='parse { a }d',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser parse 2',
+ test_name='parse x {a} { b }d',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test latexParser.stringify a b',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test latexParser.stringify a b',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test latexParser.stringify a_b',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test latexParser.stringify a\\nb',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test latexParser.stringify newcommand 01',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test latexParser.stringify newcommand 02',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test latexParser.stringify newcommand 03',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.008
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test latexParser.stringify with lineBreak 01',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test latexParser.stringify with lineBreak 02',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test stringify a_b',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test stringify $a^b$',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test stringify $ \\sin x$',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test stringify \\def\\abc [#1]#2 {#2#1abc}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test stringify \\url',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test stringify \\href[]{}{}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser stringify',
+ test_name='test stringify \\href{}{}',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser other',
+ test_name='test type guard',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mocha/latex-utensils.xml',
+ test_file=None,
+ line=None,
+ class_name='latexParser other',
+ test_name='test type guard with assingment and never type',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/mocha/latex-utensils.xml b/python/test/files/junit-xml/mocha/latex-utensils.xml
new file mode 100644
index 0000000..c5d96d7
--- /dev/null
+++ b/python/test/files/junit-xml/mocha/latex-utensils.xml
@@ -0,0 +1,111 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/junit-xml/no-attributes.annotations b/python/test/files/junit-xml/no-attributes.annotations
new file mode 100644
index 0000000..40ace0f
--- /dev/null
+++ b/python/test/files/junit-xml/no-attributes.annotations
@@ -0,0 +1,31 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '1 errors, 1 fail, 1 skipped, 1 pass in 0s',
+ 'summary':
+ '4 tests\u2002\u2003\u20031 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20031 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20031 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 '
+ '[:fire:](https://github.com/step-security/publish-unit-test-result-ac'
+ 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr'
+ '0KoLTSx8jKGIMSNfMwClfHuAoJC92Z2MxeVoISjC5kGQl0A/8EWkHmwJuIYMR58Os11ry'
+ '5wXn6LOODshGSgOiEQLRaDwdRemm3u5b+WuYllblvcag0+QlnE7YzeD8XajRvdAAAA\n',
+ 'annotations': []
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/no-attributes.junit-xml b/python/test/files/junit-xml/no-attributes.junit-xml
new file mode 100644
index 0000000..a14fca7
--- /dev/null
+++ b/python/test/files/junit-xml/no-attributes.junit-xml
@@ -0,0 +1,15 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/no-attributes.results b/python/test/files/junit-xml/no-attributes.results
new file mode 100644
index 0000000..4011aa9
--- /dev/null
+++ b/python/test/files/junit-xml/no-attributes.results
@@ -0,0 +1,22 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=4,
+ suite_skipped=1,
+ suite_failures=1,
+ suite_errors=1,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name=None,
+ tests=4,
+ skipped=1,
+ failures=1,
+ errors=1,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/no-attributes.xml b/python/test/files/junit-xml/no-attributes.xml
new file mode 100644
index 0000000..e260ae8
--- /dev/null
+++ b/python/test/files/junit-xml/no-attributes.xml
@@ -0,0 +1,15 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/junit-xml/no-cases-but-tests.annotations b/python/test/files/junit-xml/no-cases-but-tests.annotations
new file mode 100644
index 0000000..5bb2fd9
--- /dev/null
+++ b/python/test/files/junit-xml/no-cases-but-tests.annotations
@@ -0,0 +1,30 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '1 fail, 2 skipped, 3 pass in 0s',
+ 'summary':
+ '6 tests\u2002\u2003\u20033 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20032 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20031 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBAFr'
+ '0KoLfwkFl7GEJS4UcEsUBnv7spH6N7MS+bmCo7V8ol1DePWg/th8SgcGE3YEtLhvmvMe7'
+ 'ZeShJDETtcJPpfKAFHqkWxIhpMQfQ6975Z5yKXWuAqFrhuSXOe4AjSYnYT/HkBNCXSZd0'
+ 'AAAA=\n',
+ 'annotations': []
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/no-cases-but-tests.junit-xml b/python/test/files/junit-xml/no-cases-but-tests.junit-xml
new file mode 100644
index 0000000..c59bf56
--- /dev/null
+++ b/python/test/files/junit-xml/no-cases-but-tests.junit-xml
@@ -0,0 +1,4 @@
+
+
+
+
diff --git a/python/test/files/junit-xml/no-cases-but-tests.results b/python/test/files/junit-xml/no-cases-but-tests.results
new file mode 100644
index 0000000..63bc5d0
--- /dev/null
+++ b/python/test/files/junit-xml/no-cases-but-tests.results
@@ -0,0 +1,22 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=6,
+ suite_skipped=2,
+ suite_failures=1,
+ suite_errors=0,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='pytest',
+ tests=6,
+ skipped=2,
+ failures=1,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/no-cases-but-tests.xml b/python/test/files/junit-xml/no-cases-but-tests.xml
new file mode 100644
index 0000000..a1f85fb
--- /dev/null
+++ b/python/test/files/junit-xml/no-cases-but-tests.xml
@@ -0,0 +1 @@
+
diff --git a/python/test/files/junit-xml/no-cases.annotations b/python/test/files/junit-xml/no-cases.annotations
new file mode 100644
index 0000000..e49dc25
--- /dev/null
+++ b/python/test/files/junit-xml/no-cases.annotations
@@ -0,0 +1,29 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'No tests found',
+ 'summary':
+ '0 tests\u2002\u2003\u20030 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20030 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20030 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr'
+ '0K2ttDWyxiCEDfyMbtQGe8uQaNL92ZeMic49JZhVtOggAvmD9ZCOmOKFceK9cgs98LFmF'
+ '7seHTCafSdsESJXkMlspgy9/BfayxijWXLpBAwV3iX4k3DdQOuuvQ/3QAAAA==\n',
+ 'annotations': []
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/no-cases.junit-xml b/python/test/files/junit-xml/no-cases.junit-xml
new file mode 100644
index 0000000..4e2bc81
--- /dev/null
+++ b/python/test/files/junit-xml/no-cases.junit-xml
@@ -0,0 +1,4 @@
+
+
+
+
diff --git a/python/test/files/junit-xml/no-cases.results b/python/test/files/junit-xml/no-cases.results
new file mode 100644
index 0000000..df6ce55
--- /dev/null
+++ b/python/test/files/junit-xml/no-cases.results
@@ -0,0 +1,22 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=0,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='pytest',
+ tests=0,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/no-cases.xml b/python/test/files/junit-xml/no-cases.xml
new file mode 100644
index 0000000..12c8ba9
--- /dev/null
+++ b/python/test/files/junit-xml/no-cases.xml
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/python/test/files/junit-xml/non-junit.annotations b/python/test/files/junit-xml/non-junit.annotations
new file mode 100644
index 0000000..6e1249a
--- /dev/null
+++ b/python/test/files/junit-xml/non-junit.annotations
@@ -0,0 +1,39 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '1 parse errors',
+ 'summary':
+ '0 tests\u2002\u2003\u20030 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '0 suites\u2003\u20030 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20030 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n1 errors\n\n'
+ 'Results for commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBBFf'
+ '0Vm3aK2/UyIKQ35iBldRf+emNC4u+dcODc49JZhVcukgAvmBnOFvZDOmGLHemSWe+NizC'
+ 'hOvAbhNPpBWKJE3VCJLKbMffzXGotYY9kyKQTMFfpSfGh4XnRU87HdAAAA\n',
+ 'annotations': [
+ {
+ 'path': 'non-junit.xml',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'failure',
+ 'message': 'Invalid format.',
+ 'title': 'Error processing result file',
+ 'raw_details': 'non-junit.xml'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/non-junit.exception b/python/test/files/junit-xml/non-junit.exception
new file mode 100644
index 0000000..8ab21ff
--- /dev/null
+++ b/python/test/files/junit-xml/non-junit.exception
@@ -0,0 +1 @@
+JUnitXmlError('Invalid format.')
\ No newline at end of file
diff --git a/python/test/files/junit-xml/non-junit.junit-xml b/python/test/files/junit-xml/non-junit.junit-xml
new file mode 100644
index 0000000..de6d15a
--- /dev/null
+++ b/python/test/files/junit-xml/non-junit.junit-xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/non-junit.results b/python/test/files/junit-xml/non-junit.results
new file mode 100644
index 0000000..7a64531
--- /dev/null
+++ b/python/test/files/junit-xml/non-junit.results
@@ -0,0 +1,18 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[
+ publish.unittestresults.ParseError(
+ file='non-junit.xml',
+ message='Invalid format.',
+ exception=junitparser.junitparser.JUnitXmlError('Invalid format.')
+ )
+ ],
+ suites=0,
+ suite_tests=0,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=0,
+ suite_details=[],
+ cases=[]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/non-junit.xml b/python/test/files/junit-xml/non-junit.xml
new file mode 100644
index 0000000..550e86a
--- /dev/null
+++ b/python/test/files/junit-xml/non-junit.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/corrupt-xml.exception b/python/test/files/junit-xml/pytest/corrupt-xml.exception
new file mode 100644
index 0000000..56d6493
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/corrupt-xml.exception
@@ -0,0 +1 @@
+ParseError: file='files/junit-xml/pytest/corrupt-xml.xml', message='Premature end of data in tag skipped line 9, line 11, column 22 (corrupt-xml.xml, line 11)', line=None, column=None, exception=XMLSyntaxError('Premature end of data in tag skipped line 9, line 11, column 22')
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/corrupt-xml.xml b/python/test/files/junit-xml/pytest/corrupt-xml.xml
new file mode 100644
index 0000000..2024f2a
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/corrupt-xml.xml
@@ -0,0 +1,11 @@
+
+
+
+
+
+ /horovod/test/test_spark.py:1642: get_available_devices only
+ suppo
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.fail.annotations b/python/test/files/junit-xml/pytest/junit.fail.annotations
new file mode 100644
index 0000000..c19a554
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.fail.annotations
@@ -0,0 +1,74 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '1 fail, 1 skipped, 3 pass in 2s',
+ 'summary':
+ '5 tests\u2002\u2003\u20033 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '2s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20031 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20031 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr'
+ '0KoLfzExssYghA3IpgFKuPdXVEUuzez2dm5BqM8H1hTMe4jhBemiCKAs4QtIR3CderzHn'
+ '2UkkT3iQW25/kWWoD5CYXokExNBqPNvWuWuZu/WuIilrhsSbeuEAiexfws+HECiWEEJ90'
+ 'AAAA=\n',
+ 'annotations': [
+ {
+ 'path': 'test/test_spark.py',
+ 'start_line': 819,
+ 'end_line': 819,
+ 'annotation_level': 'warning',
+ 'message': 'pytest/junit.fail.xml\u2003[took 7s]',
+ 'title': 'test_rsh_events (test.test_spark.SparkTests) failed',
+ 'raw_details':
+ 'self = \n\n '
+ ' def test_rsh_events(self):\n > '
+ 'self.do_test_rsh_events(3)\n\n test_spark.py:821:\n '
+ ' _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ '
+ '_ _ _ _ _ _ _ _ _ _ _ _ _ _\n test_spark.py:836: in '
+ 'do_test_rsh_events\n self.do_test_rsh(command, 143, '
+ 'events=events)\n test_spark.py:852: in do_test_rsh\n '
+ ' self.assertEqual(expected_result, res)\n '
+ ' E AssertionError: 143 != 0'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There is 1 skipped test, see "Raw output" for the name of the '
+ 'skipped test.',
+ 'title': '1 skipped test found',
+ 'raw_details': 'test.test_spark.SparkTests ‑ test_get_available_devices'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 5 tests, see "Raw output" for the full list of tests.',
+ 'title': '5 tests found',
+ 'raw_details':
+ 'test.test_spark.SparkTests ‑ test_check_shape_compatibility\n'
+ 'test.test_spark.SparkTests ‑ test_get_available_devices\n'
+ 'test.test_spark.SparkTests ‑ test_get_col_info\n'
+ 'test.test_spark.SparkTests ‑ test_rsh_events\n'
+ 'test.test_spark.SparkTests ‑ test_rsh_with_non_zero_exit_code'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.fail.junit-xml b/python/test/files/junit-xml/pytest/junit.fail.junit-xml
new file mode 100644
index 0000000..8d24644
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.fail.junit-xml
@@ -0,0 +1,28 @@
+
+
+
+
+
+ /horovod/test/test_spark.py:1642: get_available_devices only
+ supported in Spark 3.0 and above
+
+
+
+
+ self = <test_spark.SparkTests testMethod=test_rsh_events>
+
+ def test_rsh_events(self):
+ > self.do_test_rsh_events(3)
+
+ test_spark.py:821:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+ test_spark.py:836: in do_test_rsh_events
+ self.do_test_rsh(command, 143, events=events)
+ test_spark.py:852: in do_test_rsh
+ self.assertEqual(expected_result, res)
+ E AssertionError: 143 != 0
+
+
+
+
+
diff --git a/python/test/files/junit-xml/pytest/junit.fail.results b/python/test/files/junit-xml/pytest/junit.fail.results
new file mode 100644
index 0000000..130637e
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.fail.results
@@ -0,0 +1,103 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=5,
+ suite_skipped=1,
+ suite_failures=1,
+ suite_errors=0,
+ suite_time=2,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='pytest',
+ tests=5,
+ skipped=1,
+ failures=1,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.fail.xml',
+ test_file='test/test_spark.py',
+ line=1412,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_check_shape_compatibility',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.435
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.fail.xml',
+ test_file='test/test_spark.py',
+ line=1641,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_get_available_devices',
+ result='skipped',
+ message='get_available_devices only supported in Spark 3.0 and above',
+ content='/horovod/test/test_spark.py:1642: get_available_devices only\n '
+ ' supported in Spark 3.0 and above\n ',
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.fail.xml',
+ test_file='test/test_spark.py',
+ line=1102,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_get_col_info',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.417
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.fail.xml',
+ test_file='test/test_spark.py',
+ line=819,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_rsh_events',
+ result='failure',
+ message='self = def '
+ 'test_rsh_events(self): > self.do_test_rsh_events(3) '
+ 'test_spark.py:821: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ '
+ '_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ test_spark.py:836: in '
+ 'do_test_rsh_events self.do_test_rsh(command, 143, events=events) '
+ 'test_spark.py:852: in do_test_rsh '
+ 'self.assertEqual(expected_result, res) E AssertionError: 143 != 0',
+ content='self = \n\n '
+ ' def test_rsh_events(self):\n > '
+ 'self.do_test_rsh_events(3)\n\n test_spark.py:821:\n '
+ ' _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ '
+ '_ _ _ _ _ _ _ _ _ _ _ _\n test_spark.py:836: in '
+ 'do_test_rsh_events\n self.do_test_rsh(command, 143, '
+ 'events=events)\n test_spark.py:852: in do_test_rsh\n '
+ ' self.assertEqual(expected_result, res)\n '
+ 'E AssertionError: 143 != 0\n ',
+ stdout=None,
+ stderr=None,
+ time=7.541
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.fail.xml',
+ test_file='test/test_spark.py',
+ line=813,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_rsh_with_non_zero_exit_code',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.514
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.fail.xml b/python/test/files/junit-xml/pytest/junit.fail.xml
new file mode 100644
index 0000000..b47e289
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.fail.xml
@@ -0,0 +1,46 @@
+
+
+
+
+
+ /horovod/test/test_spark.py:1642: get_available_devices only
+ supported in Spark 3.0 and above
+
+
+
+
+ self = <test_spark.SparkTests testMethod=test_rsh_events>
+
+ def test_rsh_events(self):
+ > self.do_test_rsh_events(3)
+
+ test_spark.py:821:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+ test_spark.py:836: in do_test_rsh_events
+ self.do_test_rsh(command, 143, events=events)
+ test_spark.py:852: in do_test_rsh
+ self.assertEqual(expected_result, res)
+ E AssertionError: 143 != 0
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations
new file mode 100644
index 0000000..0c3f8c2
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.annotations
@@ -0,0 +1,87 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 10 tests pass, 4 skipped in 1m 12s',
+ 'summary':
+ '14 tests\u2002\u2003\u200310 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '1m 12s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '\u205f\u20041 suites\u2003\u2003\u205f\u20044 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004'
+ '1 files\u2004\u2002\u2003\u2003\u205f\u20040 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQqAIBBFr'
+ 'yKuW1QEQZcJMaMhzRh1Fd290SxrN+/94R18Bq0cH1hTMe4C+BemgMKD3Qj7lpgWn7bugd'
+ 'EFKaOpi1lhJ1NeZgGaRPlQiBazwbC9xXj/grcovcSfXOJvTVpjwBPki7lF8PMCyjZFT+I'
+ 'AAAA=\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There are 4 skipped tests, see "Raw output" for the full list of '
+ 'skipped tests.',
+ 'title': '4 skipped tests found',
+ 'raw_details':
+ 'test.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ '
+ 'test_all_hosts_blacklisted\n'
+ 'test.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ '
+ 'test_min_hosts_timeout\n'
+ 'test.integration.test_elastic_torch.ElasticTorchTests ‑ '
+ 'test_all_hosts_blacklisted\n'
+ 'test.integration.test_elastic_torch.ElasticTorchTests ‑ '
+ 'test_min_hosts_timeout'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 14 tests, see "Raw output" for the full list of tests.',
+ 'title': '14 tests found',
+ 'raw_details':
+ 'test.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ '
+ 'test_all_hosts_blacklisted\n'
+ 'test.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ '
+ 'test_all_ranks_failure\n'
+ 'test.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ '
+ 'test_fault_tolerance_without_scaling\n'
+ 'test.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ '
+ 'test_hosts_added_and_removed\n'
+ 'test.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ '
+ 'test_min_hosts_timeout\n'
+ 'test.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ '
+ 'test_reset_limit\n'
+ 'test.integration.test_elastic_tensorflow.ElasticTensorFlowTests ‑ '
+ 'test_single_rank_failure\n'
+ 'test.integration.test_elastic_torch.ElasticTorchTests ‑ '
+ 'test_all_hosts_blacklisted\n'
+ 'test.integration.test_elastic_torch.ElasticTorchTests ‑ '
+ 'test_all_ranks_failure\n'
+ 'test.integration.test_elastic_torch.ElasticTorchTests ‑ '
+ 'test_fault_tolerance_without_scaling\n'
+ 'test.integration.test_elastic_torch.ElasticTorchTests ‑ '
+ 'test_hosts_added_and_removed\n'
+ 'test.integration.test_elastic_torch.ElasticTorchTests ‑ '
+ 'test_min_hosts_timeout\n'
+ 'test.integration.test_elastic_torch.ElasticTorchTests ‑ '
+ 'test_reset_limit\n'
+ 'test.integration.test_elastic_torch.ElasticTorchTests ‑ '
+ 'test_single_rank_failure'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.junit-xml b/python/test/files/junit-xml/pytest/junit.gloo.elastic.junit-xml
new file mode 100644
index 0000000..d7fc4c6
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.junit-xml
@@ -0,0 +1,27 @@
+
+
+
+
+ /horovod/test/integration/test_elastic_torch.py:30: This test fails due to https://github.com/horovod/horovod/issues/2030
+
+
+
+
+
+ /horovod/test/integration/test_elastic_torch.py:35: This test fails due to https://github.com/horovod/horovod/issues/2030
+
+
+
+
+ /horovod/test/integration/test_elastic_tensorflow.py:30: This test fails due to https://github.com/horovod/horovod/issues/2030
+
+
+
+
+
+ /horovod/test/integration/test_elastic_tensorflow.py:35: This test fails due to https://github.com/horovod/horovod/issues/2030
+
+
+
+
+
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.results b/python/test/files/junit-xml/pytest/junit.gloo.elastic.results
new file mode 100644
index 0000000..a764c60
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.results
@@ -0,0 +1,209 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=14,
+ suite_skipped=4,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=72,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='pytest',
+ tests=14,
+ skipped=4,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.xml',
+ test_file='test/integration/test_elastic_torch.py',
+ line=29,
+ class_name='test.integration.test_elastic_torch.ElasticTorchTests',
+ test_name='test_all_hosts_blacklisted',
+ result='skipped',
+ message='This test fails due to https://github.com/horovod/horovod/issues/2030',
+ content='/horovod/test/integration/test_elastic_torch.py:30: This test fails '
+ 'due to https://github.com/horovod/horovod/issues/2030',
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.xml',
+ test_file='test/integration/elastic_common.py',
+ line=196,
+ class_name='test.integration.test_elastic_torch.ElasticTorchTests',
+ test_name='test_all_ranks_failure',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=2.874
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.xml',
+ test_file='test/integration/elastic_common.py',
+ line=170,
+ class_name='test.integration.test_elastic_torch.ElasticTorchTests',
+ test_name='test_fault_tolerance_without_scaling',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.28
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.xml',
+ test_file='test/integration/elastic_common.py',
+ line=115,
+ class_name='test.integration.test_elastic_torch.ElasticTorchTests',
+ test_name='test_hosts_added_and_removed',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.847
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.xml',
+ test_file='test/integration/test_elastic_torch.py',
+ line=34,
+ class_name='test.integration.test_elastic_torch.ElasticTorchTests',
+ test_name='test_min_hosts_timeout',
+ result='skipped',
+ message='This test fails due to https://github.com/horovod/horovod/issues/2030',
+ content='/horovod/test/integration/test_elastic_torch.py:35: This test fails '
+ 'due to https://github.com/horovod/horovod/issues/2030',
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.xml',
+ test_file='test/integration/elastic_common.py',
+ line=242,
+ class_name='test.integration.test_elastic_torch.ElasticTorchTests',
+ test_name='test_reset_limit',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.49
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.xml',
+ test_file='test/integration/elastic_common.py',
+ line=142,
+ class_name='test.integration.test_elastic_torch.ElasticTorchTests',
+ test_name='test_single_rank_failure',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.264
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.xml',
+ test_file='test/integration/test_elastic_tensorflow.py',
+ line=29,
+ class_name='test.integration.test_elastic_tensorflow.ElasticTensorFlowTests',
+ test_name='test_all_hosts_blacklisted',
+ result='skipped',
+ message='This test fails due to https://github.com/horovod/horovod/issues/2030',
+ content='/horovod/test/integration/test_elastic_tensorflow.py:30: This test '
+ 'fails due to https://github.com/horovod/horovod/issues/2030',
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.xml',
+ test_file='test/integration/elastic_common.py',
+ line=196,
+ class_name='test.integration.test_elastic_tensorflow.ElasticTensorFlowTests',
+ test_name='test_all_ranks_failure',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.603
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.xml',
+ test_file='test/integration/elastic_common.py',
+ line=170,
+ class_name='test.integration.test_elastic_tensorflow.ElasticTensorFlowTests',
+ test_name='test_fault_tolerance_without_scaling',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=8.349
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.xml',
+ test_file='test/integration/elastic_common.py',
+ line=115,
+ class_name='test.integration.test_elastic_tensorflow.ElasticTensorFlowTests',
+ test_name='test_hosts_added_and_removed',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=12.64
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.xml',
+ test_file='test/integration/test_elastic_tensorflow.py',
+ line=34,
+ class_name='test.integration.test_elastic_tensorflow.ElasticTensorFlowTests',
+ test_name='test_min_hosts_timeout',
+ result='skipped',
+ message='This test fails due to https://github.com/horovod/horovod/issues/2030',
+ content='/horovod/test/integration/test_elastic_tensorflow.py:35: This test '
+ 'fails due to https://github.com/horovod/horovod/issues/2030',
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.xml',
+ test_file='test/integration/elastic_common.py',
+ line=242,
+ class_name='test.integration.test_elastic_tensorflow.ElasticTensorFlowTests',
+ test_name='test_reset_limit',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=12.21
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.xml',
+ test_file='test/integration/elastic_common.py',
+ line=142,
+ class_name='test.integration.test_elastic_tensorflow.ElasticTensorFlowTests',
+ test_name='test_single_rank_failure',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=8.455
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations
new file mode 100644
index 0000000..7de6020
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.annotations
@@ -0,0 +1,103 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 20 tests pass, 2 skipped in 10m 27s',
+ 'summary':
+ '22 tests\u2002\u2003\u200320 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '10m 27s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '\u205f\u20041 suites\u2003\u2003\u205f\u20042 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004'
+ '1 files\u2004\u2002\u2003\u2003\u205f\u20040 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr'
+ '0KYHZRBEy9jCGpsFDAFJuPdLaigW9/7zTv4DNvkeM+ainEXwGcYA0oP1hC2oiNBk4+jEC'
+ '8MLigVTV3MCns0WcwSNhLlY0K0+BgMJhfj/QveovQSf3KJvzVltQZP8FzMLZKfF82Ojyn'
+ 'jAAAA\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There are 2 skipped tests, see "Raw output" for the full list of '
+ 'skipped tests.',
+ 'title': '2 skipped tests found',
+ 'raw_details':
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_auto_scale_down_by_discovery\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_fault_tolerance_hosts_added_and_removed'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 22 tests, see "Raw output" for the full list of tests.',
+ 'title': '22 tests found',
+ 'raw_details':
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_auto_scale_down_by_discovery\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_auto_scale_down_by_exception\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_auto_scale_no_spark_black_list\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_auto_scale_spark_blacklist_no_executor_reuse\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_auto_scale_spark_blacklist_no_executor_reuse_in_app\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ '
+ 'test_auto_scale_spark_blacklist_no_executor_reuse_same_task\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_auto_scale_spark_blacklist_no_node_reuse\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_auto_scale_spark_blacklist_no_node_reuse_in_app\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_auto_scale_up\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_fault_tolerance_all_hosts_lost\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_fault_tolerance_exception_all_ranks\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_fault_tolerance_exception_single_rank\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_fault_tolerance_exception_with_min_hosts_timeout\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_fault_tolerance_hosts_added_and_removed\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_fault_tolerance_no_spark_blacklist\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_fault_tolerance_spark_blacklist_no_executor_reuse\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ '
+ 'test_fault_tolerance_spark_blacklist_no_executor_reuse_in_app\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ '
+ 'test_fault_tolerance_spark_blacklist_no_executor_reuse_same_task\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_fault_tolerance_spark_blacklist_no_node_reuse\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ '
+ 'test_fault_tolerance_spark_blacklist_no_node_reuse_in_app\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_fault_tolerance_unused_hosts_added_and_removed\n'
+ 'test.integration.test_elastic_spark_tensorflow.ElasticSparkTensorfl'
+ 'owTests ‑ test_happy_run'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.junit-xml b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.junit-xml
new file mode 100644
index 0000000..ead436e
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.junit-xml
@@ -0,0 +1,31 @@
+
+
+
+
+ /horovod/test/integration/test_elastic_spark_tensorflow.py:35: This test fails due to https://github.com/horovod/horovod/issues/1994
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ /horovod/test/integration/test_elastic_spark_tensorflow.py:32: This test fails due to https://github.com/horovod/horovod/issues/1994
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.results b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.results
new file mode 100644
index 0000000..ea9dd16
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.results
@@ -0,0 +1,333 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=22,
+ suite_skipped=2,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=627,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='pytest',
+ tests=22,
+ skipped=2,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='test/integration/test_elastic_spark_tensorflow.py',
+ line=34,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_auto_scale_down_by_discovery',
+ result='skipped',
+ message='This test fails due to https://github.com/horovod/horovod/issues/1994',
+ content='/horovod/test/integration/test_elastic_spark_tensorflow.py:35: This '
+ 'test fails due to https://github.com/horovod/horovod/issues/1994',
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=681,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_auto_scale_down_by_exception',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=22.073
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=710,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_auto_scale_no_spark_black_list',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=55.445
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_auto_scale_spark_blacklist_no_executor_reuse',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=49.354
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_auto_scale_spark_blacklist_no_executor_reuse_in_app',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=41.024
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_auto_scale_spark_blacklist_no_executor_reuse_same_task',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=40.743
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_auto_scale_spark_blacklist_no_node_reuse',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=40.671
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_auto_scale_spark_blacklist_no_node_reuse_in_app',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=40.612
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=626,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_auto_scale_up',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=30.312
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=613,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_fault_tolerance_all_hosts_lost',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=12.623
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=584,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_fault_tolerance_exception_all_ranks',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=15.757
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=558,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_fault_tolerance_exception_single_rank',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=26.113
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=598,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_fault_tolerance_exception_with_min_hosts_timeout',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=26.65
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='test/integration/test_elastic_spark_tensorflow.py',
+ line=31,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_fault_tolerance_hosts_added_and_removed',
+ result='skipped',
+ message='This test fails due to https://github.com/horovod/horovod/issues/1994',
+ content='/horovod/test/integration/test_elastic_spark_tensorflow.py:32: This '
+ 'test fails due to https://github.com/horovod/horovod/issues/1994',
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=503,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_fault_tolerance_no_spark_blacklist',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=25.879
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_fault_tolerance_spark_blacklist_no_executor_reuse',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=26.382
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_fault_tolerance_spark_blacklist_no_executor_reuse_in_app',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=26.381
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_fault_tolerance_spark_blacklist_no_executor_reuse_same_task',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=26.389
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_fault_tolerance_spark_blacklist_no_node_reuse',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=26.403
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_fault_tolerance_spark_blacklist_no_node_reuse_in_app',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=26.404
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=472,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_fault_tolerance_unused_hosts_added_and_removed',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=46.667
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.tf.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=394,
+ class_name='test.integration.test_elastic_spark_tensorflow.'
+ 'ElasticSparkTensorflowTests',
+ test_name='test_happy_run',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=21.082
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.xml b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.xml
new file mode 100644
index 0000000..54e7536
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.tf.xml
@@ -0,0 +1 @@
+/horovod/test/integration/test_elastic_spark_tensorflow.py:35: This test fails due to https://github.com/horovod/horovod/issues/1994/horovod/test/integration/test_elastic_spark_tensorflow.py:32: This test fails due to https://github.com/horovod/horovod/issues/1994
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations
new file mode 100644
index 0000000..d23a4c5
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.annotations
@@ -0,0 +1,84 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 22 tests pass in 11m 10s',
+ 'summary':
+ '22 tests\u2002\u2003\u200322 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '11m 10s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '\u205f\u20041 suites\u2003\u2003\u205f\u20040 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004'
+ '1 files\u2004\u2002\u2003\u2003\u205f\u20040 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOw6AIBAFr'
+ '0KoLdRCEy9jCErc+MEsUBnv7oKIYrczbzMHV7CMhnesKhg3DmyCwaGwoDfCpi1J0GT9WN'
+ 'cP9MZJ+TMz7GTSf68ELJkYETVGg25LRX9nwVu8vcCfXOBvTep1BUsQL2Ymwc8LUe9HxOM'
+ 'AAAA=\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 22 tests, see "Raw output" for the full list of tests.',
+ 'title': '22 tests found',
+ 'raw_details':
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_auto_scale_down_by_discovery\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_auto_scale_down_by_exception\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_auto_scale_no_spark_black_list\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_auto_scale_spark_blacklist_no_executor_reuse\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_auto_scale_spark_blacklist_no_executor_reuse_in_app\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_auto_scale_spark_blacklist_no_executor_reuse_same_task\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_auto_scale_spark_blacklist_no_node_reuse\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_auto_scale_spark_blacklist_no_node_reuse_in_app\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_auto_scale_up\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_fault_tolerance_all_hosts_lost\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_fault_tolerance_exception_all_ranks\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_fault_tolerance_exception_single_rank\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_fault_tolerance_exception_with_min_hosts_timeout\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_fault_tolerance_hosts_added_and_removed\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_fault_tolerance_no_spark_blacklist\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_fault_tolerance_spark_blacklist_no_executor_reuse\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_fault_tolerance_spark_blacklist_no_executor_reuse_in_app\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_fault_tolerance_spark_blacklist_no_executor_reuse_same_task\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_fault_tolerance_spark_blacklist_no_node_reuse\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_fault_tolerance_spark_blacklist_no_node_reuse_in_app\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_fault_tolerance_unused_hosts_added_and_removed\n'
+ 'test.integration.test_elastic_spark_torch.ElasticSparkTorchTests ‑ '
+ 'test_happy_run'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.junit-xml b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.junit-xml
new file mode 100644
index 0000000..c48929b
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.junit-xml
@@ -0,0 +1,27 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.results b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.results
new file mode 100644
index 0000000..ccda77b
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.results
@@ -0,0 +1,309 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=22,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=670,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='pytest',
+ tests=22,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=653,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_auto_scale_down_by_discovery',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=26.583
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=681,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_auto_scale_down_by_exception',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=20.709
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=710,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_auto_scale_no_spark_black_list',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=58.313
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_auto_scale_spark_blacklist_no_executor_reuse',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=39.759
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_auto_scale_spark_blacklist_no_executor_reuse_in_app',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=39.508
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_auto_scale_spark_blacklist_no_executor_reuse_same_task',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=39.404
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_auto_scale_spark_blacklist_no_node_reuse',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=40.36
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_auto_scale_spark_blacklist_no_node_reuse_in_app',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=39.424
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=626,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_auto_scale_up',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=27.592
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=613,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_fault_tolerance_all_hosts_lost',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=11.068
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=584,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_fault_tolerance_exception_all_ranks',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=14.72
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=558,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_fault_tolerance_exception_single_rank',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=23.053
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=598,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_fault_tolerance_exception_with_min_hosts_timeout',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=25.401
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=414,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_fault_tolerance_hosts_added_and_removed',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=48.786
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=503,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_fault_tolerance_no_spark_blacklist',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=22.948
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_fault_tolerance_spark_blacklist_no_executor_reuse',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=25.312
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_fault_tolerance_spark_blacklist_no_executor_reuse_in_app',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=25.432
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_fault_tolerance_spark_blacklist_no_executor_reuse_same_task',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=25.427
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_fault_tolerance_spark_blacklist_no_node_reuse',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=25.592
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/mock/mock.py',
+ line=1363,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_fault_tolerance_spark_blacklist_no_node_reuse_in_app',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=25.494
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=472,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_fault_tolerance_unused_hosts_added_and_removed',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=45.176
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.elastic.spark.torch.xml',
+ test_file='test/integration/elastic_spark_common.py',
+ line=394,
+ class_name='test.integration.test_elastic_spark_torch.ElasticSparkTorchTests',
+ test_name='test_happy_run',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=19.518
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.xml b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.xml
new file mode 100644
index 0000000..8b1b4b3
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.spark.torch.xml
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.elastic.xml b/python/test/files/junit-xml/pytest/junit.gloo.elastic.xml
new file mode 100644
index 0000000..ca0c7d3
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.elastic.xml
@@ -0,0 +1 @@
+/horovod/test/integration/test_elastic_torch.py:30: This test fails due to https://github.com/horovod/horovod/issues/2030/horovod/test/integration/test_elastic_torch.py:35: This test fails due to https://github.com/horovod/horovod/issues/2030/horovod/test/integration/test_elastic_tensorflow.py:30: This test fails due to https://github.com/horovod/horovod/issues/2030/horovod/test/integration/test_elastic_tensorflow.py:35: This test fails due to https://github.com/horovod/horovod/issues/2030
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations b/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations
new file mode 100644
index 0000000..6489a01
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.standalone.annotations
@@ -0,0 +1,179 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 80 tests pass, 17 skipped in 3m 25s',
+ 'summary':
+ '97 tests\u2002\u2003\u200380 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '3m 25s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '\u205f\u20041 suites\u2003\u200317 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004'
+ '1 files\u2004\u2002\u2003\u2003\u205f\u20040 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr'
+ 'yKuW1QQ/VwmxIqGSmPUVXT3xrK03bz3De/gE6yj4R0rMsaNA/vB4FBY0IqwzCsSNFk/tv'
+ 'ULvXFSkmnyaBbYfSD+TAJWMvFlRNQYDDr1Jf39Kz4iCd4i6d2c5qTeNrAE4WJmFvy8ADN'
+ 'K9FzlAAAA\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There are 17 skipped tests, see "Raw output" for the full list of '
+ 'skipped tests.',
+ 'title': '17 skipped tests found',
+ 'raw_details':
+ 'test.test_run.RunTests ‑ test_js_run\ntest.test_run.RunTests ‑ '
+ 'test_mpi_run_full\ntest.test_run.RunTests ‑ test_mpi_run_minimal\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_on_large_cluster\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_both_paths\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_both_pythonpaths\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_env_path\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_env_pythonpath\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_non_zero_exit\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_os_environ\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_sys_path\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_sys_pythonpath\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_without_path\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_without_pythonpath\n'
+ 'test.test_spark.SparkTests ‑ test_get_available_devices\n'
+ 'test.test_spark.SparkTests ‑ test_happy_run_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ test_timeout_with_mpi'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 97 tests, see "Raw output" for the full list of tests.',
+ 'title': '97 tests found',
+ 'raw_details':
+ 'test.test_run.RunTests ‑ test_autotune_args\n'
+ 'test.test_run.RunTests ‑ test_autotuning_with_fixed_param\n'
+ 'test.test_run.RunTests ‑ test_config_file\ntest.test_run.RunTests '
+ '‑ test_config_file_override_args\ntest.test_run.RunTests ‑ '
+ 'test_generate_jsrun_rankfile\ntest.test_run.RunTests ‑ '
+ 'test_get_mpi_implementation\ntest.test_run.RunTests ‑ '
+ 'test_gloo_run_minimal\ntest.test_run.RunTests ‑ '
+ 'test_gloo_run_with_os_environ\ntest.test_run.RunTests ‑ test_hash\n'
+ 'test.test_run.RunTests ‑ test_horovodrun_hostfile\n'
+ 'test.test_run.RunTests ‑ test_host_hash\ntest.test_run.RunTests ‑ '
+ 'test_in_thread_args\ntest.test_run.RunTests ‑ test_js_run\n'
+ 'test.test_run.RunTests ‑ test_library_args\ntest.test_run.RunTests '
+ '‑ test_logging_args\ntest.test_run.RunTests ‑ test_mpi_run_full\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_minimal\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_on_large_cluster\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_both_paths\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_both_pythonpaths\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_env_path\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_env_pythonpath\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_non_zero_exit\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_os_environ\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_sys_path\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_sys_pythonpath\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_without_path\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_without_pythonpath\n'
+ 'test.test_run.RunTests ‑ test_on_event\ntest.test_run.RunTests ‑ '
+ 'test_params_args\ntest.test_run.RunTests ‑ test_run_controller\n'
+ 'test.test_run.RunTests ‑ test_run_with_jsrun\n'
+ 'test.test_run.RunTests ‑ '
+ 'test_safe_shell_exec_captures_last_line_wo_eol\n'
+ 'test.test_run.RunTests ‑ test_safe_shell_exec_captures_stderr\n'
+ 'test.test_run.RunTests ‑ test_safe_shell_exec_captures_stdout\n'
+ 'test.test_run.RunTests ‑ test_safe_shell_exec_interrupts_on_event\n'
+ 'test.test_run.RunTests ‑ '
+ 'test_safe_shell_exec_interrupts_on_parent_shutdown\n'
+ 'test.test_run.RunTests ‑ test_safe_shell_exec_returns_exit_code\n'
+ 'test.test_run.RunTests ‑ test_stall_check_args\n'
+ 'test.test_run.RunTests ‑ test_timeline_args\n'
+ 'test.test_run.RunTests ‑ test_validate_config_args\n'
+ 'test.test_spark.SparkTests ‑ test_check_shape_compatibility\n'
+ 'test.test_spark.SparkTests ‑ test_df_cache\n'
+ 'test.test_spark.SparkTests ‑ test_driver_common_interfaces\n'
+ 'test.test_spark.SparkTests ‑ test_driver_common_interfaces_fails\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_driver_common_interfaces_from_settings\n'
+ 'test.test_spark.SparkTests ‑ test_driver_set_local_rank_to_index\n'
+ 'test.test_spark.SparkTests ‑ test_get_available_devices\n'
+ 'test.test_spark.SparkTests ‑ test_get_col_info\n'
+ 'test.test_spark.SparkTests ‑ test_get_col_info_error_bad_shape\n'
+ 'test.test_spark.SparkTests ‑ test_get_col_info_error_bad_size\n'
+ 'test.test_spark.SparkTests ‑ test_get_metadata\n'
+ 'test.test_spark.SparkTests ‑ test_gloo_exec_fn\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_gloo_exec_fn_provides_driver_with_local_rank\n'
+ 'test.test_spark.SparkTests ‑ test_happy_run_elastic\n'
+ 'test.test_spark.SparkTests ‑ test_happy_run_with_gloo\n'
+ 'test.test_spark.SparkTests ‑ test_happy_run_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ test_hdfs_store_parse_url\n'
+ 'test.test_spark.SparkTests ‑ test_host_hash\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_mpi_exec_fn_provides_driver_with_local_rank\n'
+ 'test.test_spark.SparkTests ‑ test_mpirun_exec_fn\n'
+ 'test.test_spark.SparkTests ‑ test_mpirun_not_found\n'
+ 'test.test_spark.SparkTests ‑ test_prepare_data_compress_sparse\n'
+ 'test.test_spark.SparkTests ‑ test_prepare_data_no_compression\n'
+ 'test.test_spark.SparkTests ‑ test_rsh_event\n'
+ 'test.test_spark.SparkTests ‑ test_rsh_events\n'
+ 'test.test_spark.SparkTests ‑ test_rsh_with_non_zero_exit_code\n'
+ 'test.test_spark.SparkTests ‑ test_rsh_with_zero_exit_code\n'
+ 'test.test_spark.SparkTests ‑ test_spark_driver_host_discovery\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_defaults_num_proc_to_spark_cores_with_gloo\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_defaults_num_proc_to_spark_cores_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_does_not_default_env_to_os_env_with_gloo\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_does_not_default_env_to_os_env_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_num_proc_precedes_spark_cores_with_gloo\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_num_proc_precedes_spark_cores_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ test_spark_run_with_gloo\n'
+ 'test.test_spark.SparkTests ‑ test_spark_run_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_with_non_zero_exit_with_gloo\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_with_non_zero_exit_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_with_os_environ_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ test_spark_run_with_path_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ test_spark_task_service_abort_command\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_task_service_abort_no_command\n'
+ 'test.test_spark.SparkTests ‑ test_spark_task_service_env\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_task_service_execute_command\n'
+ 'test.test_spark.SparkTests ‑ test_sync_hdfs_store\n'
+ 'test.test_spark.SparkTests ‑ test_task_fn_run_commands\n'
+ 'test.test_spark.SparkTests ‑ test_task_fn_run_gloo_exec\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_task_service_check_for_command_start\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_task_service_wait_for_command_start_with_timeout\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_task_service_wait_for_command_start_without_timeout\n'
+ 'test.test_spark.SparkTests ‑ test_timeout_with_gloo\n'
+ 'test.test_spark.SparkTests ‑ test_timeout_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ test_to_list\n'
+ 'test.test_spark.SparkTests ‑ test_train_val_split_col_boolean\n'
+ 'test.test_spark.SparkTests ‑ test_train_val_split_col_integer\n'
+ 'test.test_spark.SparkTests ‑ test_train_val_split_ratio'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.standalone.junit-xml b/python/test/files/junit-xml/pytest/junit.gloo.standalone.junit-xml
new file mode 100644
index 0000000..f3127af
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.standalone.junit-xml
@@ -0,0 +1,136 @@
+
+
+
+
+
+
+
+
+
+
+ /horovod/test/test_spark.py:1638: get_available_devices only supported in Spark 3.0 and above
+
+
+
+
+
+
+
+
+
+
+ /horovod/test/test_spark.py:330: Open MPI is not available
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ /horovod/test/test_spark.py:384: Open MPI is not available
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ /horovod/test/test_run.py:822: MPI is not available
+
+
+
+
+ /horovod/test/test_run.py:626: MPI is not available
+
+
+ /horovod/test/test_run.py:548: MPI is not available
+
+
+ /horovod/test/test_run.py:585: MPI is not available
+
+
+ /horovod/test/test_run.py:730: MPI is not available
+
+
+ /horovod/test/test_run.py:706: MPI is not available
+
+
+ /horovod/test/test_run.py:724: MPI is not available
+
+
+ /horovod/test/test_run.py:700: MPI is not available
+
+
+ /horovod/test/test_run.py:755: MPI is not available
+
+
+ /horovod/test/test_run.py:773: MPI is not available
+
+
+ /horovod/test/test_run.py:718: MPI is not available
+
+
+ /horovod/test/test_run.py:694: MPI is not available
+
+
+ /horovod/test/test_run.py:712: MPI is not available
+
+
+ /horovod/test/test_run.py:688: MPI is not available
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.standalone.results b/python/test/files/junit-xml/pytest/junit.gloo.standalone.results
new file mode 100644
index 0000000..b6a2ab0
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.standalone.results
@@ -0,0 +1,1285 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=97,
+ suite_skipped=17,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=205,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='pytest',
+ tests=97,
+ skipped=17,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1408,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_check_shape_compatibility',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.389
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1016,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_df_cache',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=7.311
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=119,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_driver_common_interfaces',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.508
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=140,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_driver_common_interfaces_fails',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.509
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=127,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_driver_common_interfaces_from_settings',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.507
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=152,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_driver_set_local_rank_to_index',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.51
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1637,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_get_available_devices',
+ result='skipped',
+ message='get_available_devices only supported in Spark 3.0 and above',
+ content='/horovod/test/test_spark.py:1638: get_available_devices only '
+ 'supported in Spark 3.0 and above',
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1098,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_get_col_info',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.967
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1147,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_get_col_info_error_bad_shape',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.228
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1159,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_get_col_info_error_bad_size',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.974
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1216,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_get_metadata',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.566
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=921,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_gloo_exec_fn',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=941,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_gloo_exec_fn_provides_driver_with_local_rank',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.052
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=363,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_happy_run_elastic',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=11.536
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=338,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_happy_run_with_gloo',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=10.494
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=329,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_happy_run_with_mpi',
+ result='skipped',
+ message='Open MPI is not available',
+ content='/horovod/test/test_spark.py:330: Open MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=1.415
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1505,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_hdfs_store_parse_url',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=91,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_host_hash',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=936,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_mpi_exec_fn_provides_driver_with_local_rank',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.034
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=849,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_mpirun_exec_fn',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.011
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=414,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_mpirun_not_found',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.391
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1336,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_prepare_data_compress_sparse',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.877
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1265,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_prepare_data_no_compression',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.206
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=812,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_rsh_event',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=2.517
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=815,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_rsh_events',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=7.539
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=809,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_rsh_with_non_zero_exit_code',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.513
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=806,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_rsh_with_zero_exit_code',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.513
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=993,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_driver_host_discovery',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.509
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=492,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_defaults_num_proc_to_spark_cores_with_gloo',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.829
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=485,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_defaults_num_proc_to_spark_cores_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.723
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=512,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_does_not_default_env_to_os_env_with_gloo',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.823
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=505,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_does_not_default_env_to_os_env_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.85
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=459,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_num_proc_precedes_spark_cores_with_gloo',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.875
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=452,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_num_proc_precedes_spark_cores_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.889
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=433,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_with_gloo',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.972
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=426,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.948
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=548,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_with_non_zero_exit_with_gloo',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.733
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=539,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_with_non_zero_exit_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.84
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=529,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_with_os_environ_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.87
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=472,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_with_path_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.828
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1614,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_task_service_abort_command',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.514
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1630,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_task_service_abort_no_command',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.713
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1568,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_task_service_env',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.011
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1608,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_task_service_execute_command',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.711
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1466,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_sync_hdfs_store',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=262,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_task_fn_run_commands',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.407
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=295,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_task_fn_run_gloo_exec',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.347
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=205,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_task_service_check_for_command_start',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.029
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=183,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_task_service_wait_for_command_start_with_timeout',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.017
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=175,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_task_service_wait_for_command_start_without_timeout',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.509
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=392,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_timeout_with_gloo',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=9.259
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=383,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_timeout_with_mpi',
+ result='skipped',
+ message='Open MPI is not available',
+ content='/horovod/test/test_spark.py:384: Open MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=1.325
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1649,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_to_list',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1201,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_train_val_split_col_boolean',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.255
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1186,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_train_val_split_col_integer',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.333
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1171,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_train_val_split_ratio',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.86
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=74,
+ class_name='test.test_run.RunTests',
+ test_name='test_autotune_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=93,
+ class_name='test.test_run.RunTests',
+ test_name='test_autotuning_with_fixed_param',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=165,
+ class_name='test.test_run.RunTests',
+ test_name='test_config_file',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.009
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=207,
+ class_name='test.test_run.RunTests',
+ test_name='test_config_file_override_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.007
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=864,
+ class_name='test.test_run.RunTests',
+ test_name='test_generate_jsrun_rankfile',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=427,
+ class_name='test.test_run.RunTests',
+ test_name='test_get_mpi_implementation',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=790,
+ class_name='test.test_run.RunTests',
+ test_name='test_gloo_run_minimal',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.194
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=801,
+ class_name='test.test_run.RunTests',
+ test_name='test_gloo_run_with_os_environ',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.2
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=415,
+ class_name='test.test_run.RunTests',
+ test_name='test_hash',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=809,
+ class_name='test.test_run.RunTests',
+ test_name='test_horovodrun_hostfile',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=419,
+ class_name='test.test_run.RunTests',
+ test_name='test_host_hash',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=224,
+ class_name='test.test_run.RunTests',
+ test_name='test_in_thread_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=821,
+ class_name='test.test_run.RunTests',
+ test_name='test_js_run',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/test_run.py:822: MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=0.537
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=139,
+ class_name='test.test_run.RunTests',
+ test_name='test_library_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=154,
+ class_name='test.test_run.RunTests',
+ test_name='test_logging_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=625,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_full',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/test_run.py:626: MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=0.175
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=547,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_minimal',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/test_run.py:548: MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=0.171
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=584,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_on_large_cluster',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/test_run.py:585: MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=0.172
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=729,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_both_paths',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/test_run.py:730: MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=0.171
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=705,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_both_pythonpaths',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/test_run.py:706: MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=0.177
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=723,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_env_path',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/test_run.py:724: MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=0.18
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=699,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_env_pythonpath',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/test_run.py:700: MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=0.181
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=754,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_non_zero_exit',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/test_run.py:755: MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=0.189
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=772,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_os_environ',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/test_run.py:773: MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=0.184
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=717,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_sys_path',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/test_run.py:718: MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=0.182
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=693,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_sys_pythonpath',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/test_run.py:694: MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=0.188
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=711,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_without_path',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/test_run.py:712: MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=0.194
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=687,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_without_pythonpath',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/test_run.py:688: MPI is not available',
+ stdout=None,
+ stderr=None,
+ time=0.192
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=249,
+ class_name='test.test_run.RunTests',
+ test_name='test_on_event',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.218
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=57,
+ class_name='test.test_run.RunTests',
+ test_name='test_params_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=453,
+ class_name='test.test_run.RunTests',
+ test_name='test_run_controller',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.706
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=896,
+ class_name='test.test_run.RunTests',
+ test_name='test_run_with_jsrun',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.006
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=350,
+ class_name='test.test_run.RunTests',
+ test_name='test_safe_shell_exec_captures_last_line_wo_eol',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.181
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=347,
+ class_name='test.test_run.RunTests',
+ test_name='test_safe_shell_exec_captures_stderr',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.177
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=344,
+ class_name='test.test_run.RunTests',
+ test_name='test_safe_shell_exec_captures_stdout',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.182
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=357,
+ class_name='test.test_run.RunTests',
+ test_name='test_safe_shell_exec_interrupts_on_event',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.029
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=371,
+ class_name='test.test_run.RunTests',
+ test_name='test_safe_shell_exec_interrupts_on_parent_shutdown',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.209
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=354,
+ class_name='test.test_run.RunTests',
+ test_name='test_safe_shell_exec_returns_exit_code',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.209
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=119,
+ class_name='test.test_run.RunTests',
+ test_name='test_stall_check_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.006
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=108,
+ class_name='test.test_run.RunTests',
+ test_name='test_timeline_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.standalone.xml',
+ test_file='test/test_run.py',
+ line=217,
+ class_name='test.test_run.RunTests',
+ test_name='test_validate_config_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.standalone.xml b/python/test/files/junit-xml/pytest/junit.gloo.standalone.xml
new file mode 100644
index 0000000..169af68
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.standalone.xml
@@ -0,0 +1 @@
+/horovod/test/test_spark.py:1638: get_available_devices only supported in Spark 3.0 and above/horovod/test/test_spark.py:330: Open MPI is not available/horovod/test/test_spark.py:384: Open MPI is not available/horovod/test/test_run.py:822: MPI is not available/horovod/test/test_run.py:626: MPI is not available/horovod/test/test_run.py:548: MPI is not available/horovod/test/test_run.py:585: MPI is not available/horovod/test/test_run.py:730: MPI is not available/horovod/test/test_run.py:706: MPI is not available/horovod/test/test_run.py:724: MPI is not available/horovod/test/test_run.py:700: MPI is not available/horovod/test/test_run.py:755: MPI is not available/horovod/test/test_run.py:773: MPI is not available/horovod/test/test_run.py:718: MPI is not available/horovod/test/test_run.py:694: MPI is not available/horovod/test/test_run.py:712: MPI is not available/horovod/test/test_run.py:688: MPI is not available
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.static.annotations b/python/test/files/junit-xml/pytest/junit.gloo.static.annotations
new file mode 100644
index 0000000..b34fc4c
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.static.annotations
@@ -0,0 +1,123 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 12 tests pass, 12 skipped in 1m 9s',
+ 'summary':
+ '24 tests\u2002\u2003\u200312 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '1m 9s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '\u205f\u20041 suites\u2003\u200312 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004'
+ '1 files\u2004\u2002\u2003\u2003\u205f\u20040 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMTQqAIBBGr'
+ 'yKuW1REUJcJsaQhzRh1Fd29sR+z3bz3DW/nCvTkeM+qgnEXwCcYAwoPdiVsO2JafNzq5o'
+ 'XBBSnjd/2ZBba/UQI0mTKJCdHiYzCsKRnvX/EWWfASWe/iPCetMeAJnou5WfDjBP7Rpw/'
+ 'kAAAA\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There are 12 skipped tests, see "Raw output" for the full list of '
+ 'skipped tests.',
+ 'title': '12 skipped tests found',
+ 'raw_details':
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_local_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_local_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_mixed_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_mixed_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_remote_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_remote_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_local_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_local_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_mixed_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_mixed_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_remote_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_remote_func'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 24 tests, see "Raw output" for the full list of tests.',
+ 'title': '24 tests found',
+ 'raw_details':
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_gloo_local_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_gloo_local_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_gloo_mixed_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_gloo_mixed_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_gloo_remote_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_gloo_remote_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_local_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_local_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_mixed_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_mixed_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_remote_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_remote_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_gloo_local_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_gloo_local_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_gloo_mixed_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_gloo_mixed_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_gloo_remote_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_gloo_remote_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_local_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_local_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_mixed_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_mixed_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_remote_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_remote_func'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.static.junit-xml b/python/test/files/junit-xml/pytest/junit.gloo.static.junit-xml
new file mode 100644
index 0000000..47f7f43
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.static.junit-xml
@@ -0,0 +1,53 @@
+
+
+
+
+
+
+
+
+
+
+ /horovod/test/integration/test_static_run.py:149: MPI is not available
+
+
+ /horovod/test/integration/test_static_run.py:149: MPI is not available
+
+
+ /horovod/test/integration/test_static_run.py:149: MPI is not available
+
+
+ /horovod/test/integration/test_static_run.py:149: MPI is not available
+
+
+ /horovod/test/integration/test_static_run.py:149: MPI is not available
+
+
+ /horovod/test/integration/test_static_run.py:149: MPI is not available
+
+
+
+
+
+
+
+
+ /horovod/test/integration/test_static_run.py:136: MPI is not available
+
+
+ /horovod/test/integration/test_static_run.py:136: MPI is not available
+
+
+ /horovod/test/integration/test_static_run.py:136: MPI is not available
+
+
+ /horovod/test/integration/test_static_run.py:136: MPI is not available
+
+
+ /horovod/test/integration/test_static_run.py:136: MPI is not available
+
+
+ /horovod/test/integration/test_static_run.py:136: MPI is not available
+
+
+
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.static.results b/python/test/files/junit-xml/pytest/junit.gloo.static.results
new file mode 100644
index 0000000..209433c
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.static.results
@@ -0,0 +1,347 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=24,
+ suite_skipped=12,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=69,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='pytest',
+ tests=24,
+ skipped=12,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_gloo_local_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.284
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_gloo_local_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.885
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_gloo_mixed_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.615
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_gloo_mixed_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.338
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_gloo_remote_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.68
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_gloo_remote_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.279
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_mpi_local_cmd',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/integration/test_static_run.py:149: MPI is not '
+ 'available',
+ stdout=None,
+ stderr=None,
+ time=1.422
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_mpi_local_func',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/integration/test_static_run.py:149: MPI is not '
+ 'available',
+ stdout=None,
+ stderr=None,
+ time=1.329
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_mpi_mixed_cmd',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/integration/test_static_run.py:149: MPI is not '
+ 'available',
+ stdout=None,
+ stderr=None,
+ time=1.32
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_mpi_mixed_func',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/integration/test_static_run.py:149: MPI is not '
+ 'available',
+ stdout=None,
+ stderr=None,
+ time=1.324
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_mpi_remote_cmd',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/integration/test_static_run.py:149: MPI is not '
+ 'available',
+ stdout=None,
+ stderr=None,
+ time=1.318
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_mpi_remote_func',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/integration/test_static_run.py:149: MPI is not '
+ 'available',
+ stdout=None,
+ stderr=None,
+ time=1.321
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_gloo_local_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=2.914
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_gloo_local_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.739
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_gloo_mixed_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.611
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_gloo_mixed_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.809
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_gloo_remote_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.597
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_gloo_remote_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.788
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_mpi_local_cmd',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/integration/test_static_run.py:136: MPI is not '
+ 'available',
+ stdout=None,
+ stderr=None,
+ time=1.377
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_mpi_local_func',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/integration/test_static_run.py:136: MPI is not '
+ 'available',
+ stdout=None,
+ stderr=None,
+ time=1.361
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_mpi_mixed_cmd',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/integration/test_static_run.py:136: MPI is not '
+ 'available',
+ stdout=None,
+ stderr=None,
+ time=1.373
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_mpi_mixed_func',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/integration/test_static_run.py:136: MPI is not '
+ 'available',
+ stdout=None,
+ stderr=None,
+ time=1.436
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_mpi_remote_cmd',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/integration/test_static_run.py:136: MPI is not '
+ 'available',
+ stdout=None,
+ stderr=None,
+ time=1.351
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.gloo.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_mpi_remote_func',
+ result='skipped',
+ message='MPI is not available',
+ content='/horovod/test/integration/test_static_run.py:136: MPI is not '
+ 'available',
+ stdout=None,
+ stderr=None,
+ time=1.425
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.gloo.static.xml b/python/test/files/junit-xml/pytest/junit.gloo.static.xml
new file mode 100644
index 0000000..5c574fc
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.gloo.static.xml
@@ -0,0 +1 @@
+/horovod/test/integration/test_static_run.py:149: MPI is not available/horovod/test/integration/test_static_run.py:149: MPI is not available/horovod/test/integration/test_static_run.py:149: MPI is not available/horovod/test/integration/test_static_run.py:149: MPI is not available/horovod/test/integration/test_static_run.py:149: MPI is not available/horovod/test/integration/test_static_run.py:149: MPI is not available/horovod/test/integration/test_static_run.py:136: MPI is not available/horovod/test/integration/test_static_run.py:136: MPI is not available/horovod/test/integration/test_static_run.py:136: MPI is not available/horovod/test/integration/test_static_run.py:136: MPI is not available/horovod/test/integration/test_static_run.py:136: MPI is not available/horovod/test/integration/test_static_run.py:136: MPI is not available
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations b/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations
new file mode 100644
index 0000000..d1498f0
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.mpi.integration.annotations
@@ -0,0 +1,44 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 3 tests pass in 15s',
+ 'summary':
+ '3 tests\u2002\u2003\u20033 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '15s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20030 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20030 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr'
+ '0K2ttAYGy9jCGLciGAWqIx3d/EL3Zt5yewwodEeetFUAnzE8MEYSQZ0NmHHzE9IX/vuwU'
+ 'elSrHgxqL+xCTRFEITOXoMRfv20sxzN/+1i7PYxXlLuXXFwPAs4WcJxwk6KM9l3gAAAA='
+ '=\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 3 tests, see "Raw output" for the full list of tests.',
+ 'title': '3 tests found',
+ 'raw_details':
+ 'test.test_interactiverun.InteractiveRunTests ‑ test_failed_run\n'
+ 'test.test_interactiverun.InteractiveRunTests ‑ test_happy_run\n'
+ 'test.test_interactiverun.InteractiveRunTests ‑ '
+ 'test_happy_run_elastic'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.mpi.integration.junit-xml b/python/test/files/junit-xml/pytest/junit.mpi.integration.junit-xml
new file mode 100644
index 0000000..44ac603
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.mpi.integration.junit-xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/pytest/junit.mpi.integration.results b/python/test/files/junit-xml/pytest/junit.mpi.integration.results
new file mode 100644
index 0000000..40b579c
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.mpi.integration.results
@@ -0,0 +1,62 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=3,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=15,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='pytest',
+ tests=3,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.integration.xml',
+ test_file='test/test_interactiverun.py',
+ line=78,
+ class_name='test.test_interactiverun.InteractiveRunTests',
+ test_name='test_failed_run',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=9.386
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.integration.xml',
+ test_file='test/test_interactiverun.py',
+ line=35,
+ class_name='test.test_interactiverun.InteractiveRunTests',
+ test_name='test_happy_run',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.012
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.integration.xml',
+ test_file='test/test_interactiverun.py',
+ line=63,
+ class_name='test.test_interactiverun.InteractiveRunTests',
+ test_name='test_happy_run_elastic',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.898
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.mpi.integration.xml b/python/test/files/junit-xml/pytest/junit.mpi.integration.xml
new file mode 100644
index 0000000..29d3389
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.mpi.integration.xml
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations b/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations
new file mode 100644
index 0000000..e243980
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.mpi.standalone.annotations
@@ -0,0 +1,163 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 96 tests pass, 1 skipped in 3m 39s',
+ 'summary':
+ '97 tests\u2002\u2003\u200396 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '3m 39s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '\u205f\u20041 suites\u2003\u2003\u205f\u20041 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004'
+ '1 files\u2004\u2002\u2003\u2003\u205f\u20040 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr'
+ '0KoLdRCo5cxBCFu/GAWqIx3d1FE7N7MS+bgGhZlec+qgnHrwSUYPQoHZiOsq44EXS6cXf'
+ 'vCYL2UwTSfmWGPgUdoAQuJMgmFaDAa9Fsqhv0LPuLr3Zzlbs5r0qwrOIK4mJ0EPy/3HdY'
+ 'E4wAAAA==\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There is 1 skipped test, see "Raw output" for the name of the '
+ 'skipped test.',
+ 'title': '1 skipped test found',
+ 'raw_details': 'test.test_spark.SparkTests ‑ test_get_available_devices'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 97 tests, see "Raw output" for the full list of tests.',
+ 'title': '97 tests found',
+ 'raw_details':
+ 'test.test_run.RunTests ‑ test_autotune_args\n'
+ 'test.test_run.RunTests ‑ test_autotuning_with_fixed_param\n'
+ 'test.test_run.RunTests ‑ test_config_file\ntest.test_run.RunTests '
+ '‑ test_config_file_override_args\ntest.test_run.RunTests ‑ '
+ 'test_generate_jsrun_rankfile\ntest.test_run.RunTests ‑ '
+ 'test_get_mpi_implementation\ntest.test_run.RunTests ‑ '
+ 'test_gloo_run_minimal\ntest.test_run.RunTests ‑ '
+ 'test_gloo_run_with_os_environ\ntest.test_run.RunTests ‑ test_hash\n'
+ 'test.test_run.RunTests ‑ test_horovodrun_hostfile\n'
+ 'test.test_run.RunTests ‑ test_host_hash\ntest.test_run.RunTests ‑ '
+ 'test_in_thread_args\ntest.test_run.RunTests ‑ test_js_run\n'
+ 'test.test_run.RunTests ‑ test_library_args\ntest.test_run.RunTests '
+ '‑ test_logging_args\ntest.test_run.RunTests ‑ test_mpi_run_full\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_minimal\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_on_large_cluster\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_both_paths\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_both_pythonpaths\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_env_path\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_env_pythonpath\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_non_zero_exit\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_os_environ\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_sys_path\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_with_sys_pythonpath\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_without_path\n'
+ 'test.test_run.RunTests ‑ test_mpi_run_without_pythonpath\n'
+ 'test.test_run.RunTests ‑ test_on_event\ntest.test_run.RunTests ‑ '
+ 'test_params_args\ntest.test_run.RunTests ‑ test_run_controller\n'
+ 'test.test_run.RunTests ‑ test_run_with_jsrun\n'
+ 'test.test_run.RunTests ‑ '
+ 'test_safe_shell_exec_captures_last_line_wo_eol\n'
+ 'test.test_run.RunTests ‑ test_safe_shell_exec_captures_stderr\n'
+ 'test.test_run.RunTests ‑ test_safe_shell_exec_captures_stdout\n'
+ 'test.test_run.RunTests ‑ test_safe_shell_exec_interrupts_on_event\n'
+ 'test.test_run.RunTests ‑ '
+ 'test_safe_shell_exec_interrupts_on_parent_shutdown\n'
+ 'test.test_run.RunTests ‑ test_safe_shell_exec_returns_exit_code\n'
+ 'test.test_run.RunTests ‑ test_stall_check_args\n'
+ 'test.test_run.RunTests ‑ test_timeline_args\n'
+ 'test.test_run.RunTests ‑ test_validate_config_args\n'
+ 'test.test_spark.SparkTests ‑ test_check_shape_compatibility\n'
+ 'test.test_spark.SparkTests ‑ test_df_cache\n'
+ 'test.test_spark.SparkTests ‑ test_driver_common_interfaces\n'
+ 'test.test_spark.SparkTests ‑ test_driver_common_interfaces_fails\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_driver_common_interfaces_from_settings\n'
+ 'test.test_spark.SparkTests ‑ test_driver_set_local_rank_to_index\n'
+ 'test.test_spark.SparkTests ‑ test_get_available_devices\n'
+ 'test.test_spark.SparkTests ‑ test_get_col_info\n'
+ 'test.test_spark.SparkTests ‑ test_get_col_info_error_bad_shape\n'
+ 'test.test_spark.SparkTests ‑ test_get_col_info_error_bad_size\n'
+ 'test.test_spark.SparkTests ‑ test_get_metadata\n'
+ 'test.test_spark.SparkTests ‑ test_gloo_exec_fn\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_gloo_exec_fn_provides_driver_with_local_rank\n'
+ 'test.test_spark.SparkTests ‑ test_happy_run_elastic\n'
+ 'test.test_spark.SparkTests ‑ test_happy_run_with_gloo\n'
+ 'test.test_spark.SparkTests ‑ test_happy_run_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ test_hdfs_store_parse_url\n'
+ 'test.test_spark.SparkTests ‑ test_host_hash\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_mpi_exec_fn_provides_driver_with_local_rank\n'
+ 'test.test_spark.SparkTests ‑ test_mpirun_exec_fn\n'
+ 'test.test_spark.SparkTests ‑ test_mpirun_not_found\n'
+ 'test.test_spark.SparkTests ‑ test_prepare_data_compress_sparse\n'
+ 'test.test_spark.SparkTests ‑ test_prepare_data_no_compression\n'
+ 'test.test_spark.SparkTests ‑ test_rsh_event\n'
+ 'test.test_spark.SparkTests ‑ test_rsh_events\n'
+ 'test.test_spark.SparkTests ‑ test_rsh_with_non_zero_exit_code\n'
+ 'test.test_spark.SparkTests ‑ test_rsh_with_zero_exit_code\n'
+ 'test.test_spark.SparkTests ‑ test_spark_driver_host_discovery\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_defaults_num_proc_to_spark_cores_with_gloo\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_defaults_num_proc_to_spark_cores_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_does_not_default_env_to_os_env_with_gloo\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_does_not_default_env_to_os_env_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_num_proc_precedes_spark_cores_with_gloo\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_num_proc_precedes_spark_cores_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ test_spark_run_with_gloo\n'
+ 'test.test_spark.SparkTests ‑ test_spark_run_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_with_non_zero_exit_with_gloo\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_with_non_zero_exit_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_run_with_os_environ_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ test_spark_run_with_path_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ test_spark_task_service_abort_command\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_task_service_abort_no_command\n'
+ 'test.test_spark.SparkTests ‑ test_spark_task_service_env\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_spark_task_service_execute_command\n'
+ 'test.test_spark.SparkTests ‑ test_sync_hdfs_store\n'
+ 'test.test_spark.SparkTests ‑ test_task_fn_run_commands\n'
+ 'test.test_spark.SparkTests ‑ test_task_fn_run_gloo_exec\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_task_service_check_for_command_start\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_task_service_wait_for_command_start_with_timeout\n'
+ 'test.test_spark.SparkTests ‑ '
+ 'test_task_service_wait_for_command_start_without_timeout\n'
+ 'test.test_spark.SparkTests ‑ test_timeout_with_gloo\n'
+ 'test.test_spark.SparkTests ‑ test_timeout_with_mpi\n'
+ 'test.test_spark.SparkTests ‑ test_to_list\n'
+ 'test.test_spark.SparkTests ‑ test_train_val_split_col_boolean\n'
+ 'test.test_spark.SparkTests ‑ test_train_val_split_col_integer\n'
+ 'test.test_spark.SparkTests ‑ test_train_val_split_ratio'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.mpi.standalone.junit-xml b/python/test/files/junit-xml/pytest/junit.mpi.standalone.junit-xml
new file mode 100644
index 0000000..63bfd1d
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.mpi.standalone.junit-xml
@@ -0,0 +1,104 @@
+
+
+
+
+
+
+
+
+
+
+ /horovod/test/test_spark.py:1638: get_available_devices only supported in Spark 3.0 and above
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/pytest/junit.mpi.standalone.results b/python/test/files/junit-xml/pytest/junit.mpi.standalone.results
new file mode 100644
index 0000000..f1f9814
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.mpi.standalone.results
@@ -0,0 +1,1285 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=97,
+ suite_skipped=1,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=219,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='pytest',
+ tests=97,
+ skipped=1,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1408,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_check_shape_compatibility',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=7.035
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1016,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_df_cache',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=7.226
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=119,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_driver_common_interfaces',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.508
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=140,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_driver_common_interfaces_fails',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.508
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=127,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_driver_common_interfaces_from_settings',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.507
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=152,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_driver_set_local_rank_to_index',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.51
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1637,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_get_available_devices',
+ result='skipped',
+ message='get_available_devices only supported in Spark 3.0 and above',
+ content='/horovod/test/test_spark.py:1638: get_available_devices only '
+ 'supported in Spark 3.0 and above',
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1098,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_get_col_info',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.198
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1147,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_get_col_info_error_bad_shape',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.138
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1159,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_get_col_info_error_bad_size',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.161
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1216,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_get_metadata',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.009
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=921,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_gloo_exec_fn',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=941,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_gloo_exec_fn_provides_driver_with_local_rank',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.034
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=363,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_happy_run_elastic',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=11.495
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=338,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_happy_run_with_gloo',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=10.036
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=329,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_happy_run_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=9.208
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1505,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_hdfs_store_parse_url',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=91,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_host_hash',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=936,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_mpi_exec_fn_provides_driver_with_local_rank',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.033
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=849,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_mpirun_exec_fn',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.011
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=414,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_mpirun_not_found',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.869
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1336,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_prepare_data_compress_sparse',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.896
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1265,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_prepare_data_no_compression',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.362
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=812,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_rsh_event',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=2.517
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=815,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_rsh_events',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=7.534
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=809,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_rsh_with_non_zero_exit_code',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.512
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=806,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_rsh_with_zero_exit_code',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=2.013
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=993,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_driver_host_discovery',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.508
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=492,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_defaults_num_proc_to_spark_cores_with_gloo',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.972
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=485,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_defaults_num_proc_to_spark_cores_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.868
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=512,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_does_not_default_env_to_os_env_with_gloo',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.865
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=505,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_does_not_default_env_to_os_env_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.955
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=459,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_num_proc_precedes_spark_cores_with_gloo',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.872
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=452,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_num_proc_precedes_spark_cores_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.868
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=433,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_with_gloo',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.975
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=426,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.946
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=548,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_with_non_zero_exit_with_gloo',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.868
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=539,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_with_non_zero_exit_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.962
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=529,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_with_os_environ_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.863
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=472,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_run_with_path_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.822
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1614,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_task_service_abort_command',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.511
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1630,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_task_service_abort_no_command',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.712
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1568,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_task_service_env',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.011
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1608,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_spark_task_service_execute_command',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.712
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1466,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_sync_hdfs_store',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=262,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_task_fn_run_commands',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.289
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=295,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_task_fn_run_gloo_exec',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.251
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=205,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_task_service_check_for_command_start',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.022
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=183,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_task_service_wait_for_command_start_with_timeout',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.013
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=175,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_task_service_wait_for_command_start_without_timeout',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.509
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=392,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_timeout_with_gloo',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=9.096
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=383,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_timeout_with_mpi',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=9.264
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1649,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_to_list',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1201,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_train_val_split_col_boolean',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.052
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1186,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_train_val_split_col_integer',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.158
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_spark.py',
+ line=1171,
+ class_name='test.test_spark.SparkTests',
+ test_name='test_train_val_split_ratio',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.948
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=74,
+ class_name='test.test_run.RunTests',
+ test_name='test_autotune_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=93,
+ class_name='test.test_run.RunTests',
+ test_name='test_autotuning_with_fixed_param',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=165,
+ class_name='test.test_run.RunTests',
+ test_name='test_config_file',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.008
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=207,
+ class_name='test.test_run.RunTests',
+ test_name='test_config_file_override_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.008
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=864,
+ class_name='test.test_run.RunTests',
+ test_name='test_generate_jsrun_rankfile',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=427,
+ class_name='test.test_run.RunTests',
+ test_name='test_get_mpi_implementation',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=790,
+ class_name='test.test_run.RunTests',
+ test_name='test_gloo_run_minimal',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.187
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=801,
+ class_name='test.test_run.RunTests',
+ test_name='test_gloo_run_with_os_environ',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.188
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=415,
+ class_name='test.test_run.RunTests',
+ test_name='test_hash',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=809,
+ class_name='test.test_run.RunTests',
+ test_name='test_horovodrun_hostfile',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=419,
+ class_name='test.test_run.RunTests',
+ test_name='test_host_hash',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=224,
+ class_name='test.test_run.RunTests',
+ test_name='test_in_thread_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=821,
+ class_name='test.test_run.RunTests',
+ test_name='test_js_run',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.181
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=139,
+ class_name='test.test_run.RunTests',
+ test_name='test_library_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=154,
+ class_name='test.test_run.RunTests',
+ test_name='test_logging_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=625,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_full',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.181
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=547,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_minimal',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.197
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=584,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_on_large_cluster',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.178
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=729,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_both_paths',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.177
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=705,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_both_pythonpaths',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.179
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=723,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_env_path',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.184
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=699,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_env_pythonpath',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.207
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=754,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_non_zero_exit',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.179
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=772,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_os_environ',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.18
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=717,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_sys_path',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.186
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=693,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_with_sys_pythonpath',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.179
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=711,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_without_path',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.179
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=687,
+ class_name='test.test_run.RunTests',
+ test_name='test_mpi_run_without_pythonpath',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.179
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=249,
+ class_name='test.test_run.RunTests',
+ test_name='test_on_event',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.218
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=57,
+ class_name='test.test_run.RunTests',
+ test_name='test_params_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=453,
+ class_name='test.test_run.RunTests',
+ test_name='test_run_controller',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.715
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=896,
+ class_name='test.test_run.RunTests',
+ test_name='test_run_with_jsrun',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.006
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=350,
+ class_name='test.test_run.RunTests',
+ test_name='test_safe_shell_exec_captures_last_line_wo_eol',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.174
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=347,
+ class_name='test.test_run.RunTests',
+ test_name='test_safe_shell_exec_captures_stderr',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.177
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=344,
+ class_name='test.test_run.RunTests',
+ test_name='test_safe_shell_exec_captures_stdout',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.174
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=357,
+ class_name='test.test_run.RunTests',
+ test_name='test_safe_shell_exec_interrupts_on_event',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.028
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=371,
+ class_name='test.test_run.RunTests',
+ test_name='test_safe_shell_exec_interrupts_on_parent_shutdown',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.208
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=354,
+ class_name='test.test_run.RunTests',
+ test_name='test_safe_shell_exec_returns_exit_code',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.178
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=119,
+ class_name='test.test_run.RunTests',
+ test_name='test_stall_check_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.006
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=108,
+ class_name='test.test_run.RunTests',
+ test_name='test_timeline_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.standalone.xml',
+ test_file='test/test_run.py',
+ line=217,
+ class_name='test.test_run.RunTests',
+ test_name='test_validate_config_args',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.mpi.standalone.xml b/python/test/files/junit-xml/pytest/junit.mpi.standalone.xml
new file mode 100644
index 0000000..e1b36da
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.mpi.standalone.xml
@@ -0,0 +1 @@
+/horovod/test/test_spark.py:1638: get_available_devices only supported in Spark 3.0 and above
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.mpi.static.annotations b/python/test/files/junit-xml/pytest/junit.mpi.static.annotations
new file mode 100644
index 0000000..687830f
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.mpi.static.annotations
@@ -0,0 +1,88 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 24 tests pass in 2m 4s',
+ 'summary':
+ '24 tests\u2002\u2003\u200324 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '2m 4s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '\u205f\u20041 suites\u2003\u2003\u205f\u20040 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004'
+ '1 files\u2004\u2002\u2003\u2003\u205f\u20040 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOwqAMBAFr'
+ 'xK2tlCx8jISYsTFT2STVOLd3ajkY/dmHswJE67aQi+aSoD16CKMnqRDswdsOxZ8uXAmGK'
+ 'xX6mcWPNjUUUwS10JoIkOfIb/HYthF8BWp93CWezivKbNt6Bi+Jews4boBWo1x8eMAAAA'
+ '=\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 24 tests, see "Raw output" for the full list of tests.',
+ 'title': '24 tests found',
+ 'raw_details':
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_gloo_local_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_gloo_local_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_gloo_mixed_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_gloo_mixed_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_gloo_remote_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_gloo_remote_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_local_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_local_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_mixed_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_mixed_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_remote_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_failure_mpi_remote_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_gloo_local_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_gloo_local_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_gloo_mixed_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_gloo_mixed_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_gloo_remote_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_gloo_remote_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_local_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_local_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_mixed_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_mixed_func\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_remote_cmd\n'
+ 'test.integration.test_static_run.StaticRunTests ‑ '
+ 'test_run_success_mpi_remote_func'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.mpi.static.junit-xml b/python/test/files/junit-xml/pytest/junit.mpi.static.junit-xml
new file mode 100644
index 0000000..7458c66
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.mpi.static.junit-xml
@@ -0,0 +1,29 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/pytest/junit.mpi.static.results b/python/test/files/junit-xml/pytest/junit.mpi.static.results
new file mode 100644
index 0000000..78a3045
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.mpi.static.results
@@ -0,0 +1,335 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=24,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=124,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='pytest',
+ tests=24,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_gloo_local_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.067
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_gloo_local_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.856
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_gloo_mixed_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.578
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_gloo_mixed_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.311
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_gloo_remote_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.867
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_gloo_remote_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.054
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_mpi_local_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.032
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_mpi_local_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.664
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_mpi_mixed_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.398
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_mpi_mixed_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=7.151
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_mpi_remote_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=7.365
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=148,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_failure_mpi_remote_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.825
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_gloo_local_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.439
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_gloo_local_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.429
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_gloo_mixed_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.804
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_gloo_mixed_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.134
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_gloo_remote_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.886
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_gloo_remote_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.585
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_mpi_local_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.761
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_mpi_local_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.591
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_mpi_mixed_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.203
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_mpi_mixed_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.504
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_mpi_remote_cmd',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.238
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.mpi.static.xml',
+ test_file='test/integration/test_static_run.py',
+ line=135,
+ class_name='test.integration.test_static_run.StaticRunTests',
+ test_name='test_run_success_mpi_remote_func',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.697
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.mpi.static.xml b/python/test/files/junit-xml/pytest/junit.mpi.static.xml
new file mode 100644
index 0000000..c76171f
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.mpi.static.xml
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations b/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations
new file mode 100644
index 0000000..863ba84
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.spark.integration.1.annotations
@@ -0,0 +1,109 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 33 tests pass, 2 skipped in 2m 45s',
+ 'summary':
+ '35 tests\u2002\u2003\u200333 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '2m 45s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '\u205f\u20041 suites\u2003\u2003\u205f\u20042 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004'
+ '1 files\u2004\u2002\u2003\u2003\u205f\u20040 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr'
+ '0KoLfxECy9jCErcKGAWqIx3d1GzavdmXjK7NLBOQfaiKoQMCSLDmFBF8C5j15KgK+azYR'
+ 'hC0jqb5jULbGRqFkbBSqJkMSF6fAwmx8W8f8FbvL2LP7mLvzXtrYVI8CwRZiWPEwEjqVj'
+ 'jAAAA\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There are 2 skipped tests, see "Raw output" for the full list of '
+ 'skipped tests.',
+ 'title': '2 skipped tests found',
+ 'raw_details':
+ 'test.test_spark_keras.SparkKerasTests ‑ test_session\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_happy_run_elastic_fault_tolerant_fails'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 35 tests, see "Raw output" for the full list of tests.',
+ 'title': '35 tests found',
+ 'raw_details':
+ 'test.test_spark_keras.SparkKerasTests ‑ test_batch_generator_fn\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_calculate_shuffle_buffer_size\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_calculate_shuffle_buffer_size_small_row_size\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_convert_custom_sparse_to_dense_bare_keras_fn\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_custom_sparse_to_dense_fn\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ test_fit_model\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ test_fit_model_multiclass\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_keras_direct_parquet_train\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_keras_model_checkpoint_callback\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ test_model_serialization\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_prep_data_tf_keras_fn_with_sparse_col\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_prep_data_tf_keras_fn_without_sparse_col\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_prepare_data_bare_keras_fn\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ test_reshape\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_restore_from_checkpoint\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ test_serialize_param_value\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ test_session\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_calculate_loss_with_sample_weight\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_calculate_loss_without_sample_weight\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_calculate_shuffle_buffer_size\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_calculate_shuffle_buffer_size_small_row_size\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_construct_metric_value_holders_one_metric_for_all_labels\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_fit_model\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_get_metric_avgs\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_happy_run_elastic\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_happy_run_elastic_fault_tolerant\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_happy_run_elastic_fault_tolerant_fails\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_metric_class\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_prepare_np_data\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_pytorch_get_optimizer_with_unscaled_lr\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_restore_from_checkpoint\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_torch_direct_parquet_train\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_torch_param_serialize\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_transform_multi_class\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_update_metrics'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.1.junit-xml b/python/test/files/junit-xml/pytest/junit.spark.integration.1.junit-xml
new file mode 100644
index 0000000..3e67db1
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.spark.integration.1.junit-xml
@@ -0,0 +1,44 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/test_util.py:2076: Not a test.
+
+
+
+
+
+
+
+
+
+
+
+ /horovod/test/test_spark_torch.py:469: elastic horovod does not support shutdown from the spark driver while elastic driver is waiting for hosts to come up
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.1.results b/python/test/files/junit-xml/pytest/junit.spark.integration.1.results
new file mode 100644
index 0000000..bb254f4
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.spark.integration.1.results
@@ -0,0 +1,483 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=35,
+ suite_skipped=2,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=165,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='pytest',
+ tests=35,
+ skipped=2,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=454,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_batch_generator_fn',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.006
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=385,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_calculate_shuffle_buffer_size',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=371,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_calculate_shuffle_buffer_size_small_row_size',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=410,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_convert_custom_sparse_to_dense_bare_keras_fn',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=399,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_custom_sparse_to_dense_fn',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.041
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=75,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_fit_model',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=14.352
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=103,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_fit_model_multiclass',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=34.284
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=186,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_keras_direct_parquet_train',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=11.54
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=225,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_keras_model_checkpoint_callback',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=14.137
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=322,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_model_serialization',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.851
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=575,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_prep_data_tf_keras_fn_with_sparse_col',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.051
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=612,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_prep_data_tf_keras_fn_without_sparse_col',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.032
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=416,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_prepare_data_bare_keras_fn',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=528,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_reshape',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.039
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=139,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_restore_from_checkpoint',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=7.23
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_keras.py',
+ line=361,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_serialize_param_value',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/tensorflow_core/python/'
+ 'framework/test_util.py',
+ line=2075,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_session',
+ result='skipped',
+ message='Not a test.',
+ content='/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framewo'
+ 'rk/test_util.py:2076: Not a test.',
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=372,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_calculate_loss_with_sample_weight',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.022
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=401,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_calculate_loss_without_sample_weight',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=198,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_calculate_shuffle_buffer_size',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=184,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_calculate_shuffle_buffer_size_small_row_size',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=227,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_construct_metric_value_holders_one_metric_for_all_labels',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=73,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_fit_model',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=12.786
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=269,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_get_metric_avgs',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.007
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=433,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_happy_run_elastic',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=9.165
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=447,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_happy_run_elastic_fault_tolerant',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=23.934
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=468,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_happy_run_elastic_fault_tolerant_fails',
+ result='skipped',
+ message='elastic horovod does not support shutdown from the spark driver '
+ 'while elastic driver is waiting for hosts to come up',
+ content='/horovod/test/test_spark_torch.py:469: elastic horovod does not '
+ 'support shutdown from the spark driver while elastic driver is '
+ 'waiting for hosts to come up',
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=213,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_metric_class',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.026
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=251,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_prepare_np_data',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.319
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=167,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_pytorch_get_optimizer_with_unscaled_lr',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=103,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_restore_from_checkpoint',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=7.198
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=335,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_torch_direct_parquet_train',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=8.305
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=325,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_torch_param_serialize',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=140,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_transform_multi_class',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.549
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.1.xml',
+ test_file='test/test_spark_torch.py',
+ line=291,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_update_metrics',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.008
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.1.xml b/python/test/files/junit-xml/pytest/junit.spark.integration.1.xml
new file mode 100644
index 0000000..73e6501
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.spark.integration.1.xml
@@ -0,0 +1 @@
+/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/test_util.py:2076: Not a test./horovod/test/test_spark_torch.py:469: elastic horovod does not support shutdown from the spark driver while elastic driver is waiting for hosts to come up
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations b/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations
new file mode 100644
index 0000000..cc90146
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.spark.integration.2.annotations
@@ -0,0 +1,109 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 33 tests pass, 2 skipped in 2m 52s',
+ 'summary':
+ '35 tests\u2002\u2003\u200333 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '2m 52s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '\u205f\u20041 suites\u2003\u2003\u205f\u20042 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004'
+ '1 files\u2004\u2002\u2003\u2003\u205f\u20040 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMSwqAMAwFr'
+ '1K6duEHEbxMKVUx+Kmk7Uq8u6mfVHeZeWF2OcDcO9mKIhPSBfAMXUDtwa4Rm5IETT6OVf'
+ '2CcsGYaKpkJtjI8L8aNMwkchY9osXHYFi5GO9f8Bapd/End/G3ZuyygCd4LuFGLY8TfGY'
+ 'a1uMAAAA=\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There are 2 skipped tests, see "Raw output" for the full list of '
+ 'skipped tests.',
+ 'title': '2 skipped tests found',
+ 'raw_details':
+ 'test.test_spark_keras.SparkKerasTests ‑ test_session\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_happy_run_elastic_fault_tolerant_fails'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 35 tests, see "Raw output" for the full list of tests.',
+ 'title': '35 tests found',
+ 'raw_details':
+ 'test.test_spark_keras.SparkKerasTests ‑ test_batch_generator_fn\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_calculate_shuffle_buffer_size\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_calculate_shuffle_buffer_size_small_row_size\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_convert_custom_sparse_to_dense_bare_keras_fn\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_custom_sparse_to_dense_fn\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ test_fit_model\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ test_fit_model_multiclass\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_keras_direct_parquet_train\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_keras_model_checkpoint_callback\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ test_model_serialization\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_prep_data_tf_keras_fn_with_sparse_col\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_prep_data_tf_keras_fn_without_sparse_col\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_prepare_data_bare_keras_fn\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ test_reshape\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ '
+ 'test_restore_from_checkpoint\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ test_serialize_param_value\n'
+ 'test.test_spark_keras.SparkKerasTests ‑ test_session\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_calculate_loss_with_sample_weight\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_calculate_loss_without_sample_weight\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_calculate_shuffle_buffer_size\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_calculate_shuffle_buffer_size_small_row_size\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_construct_metric_value_holders_one_metric_for_all_labels\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_fit_model\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_get_metric_avgs\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_happy_run_elastic\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_happy_run_elastic_fault_tolerant\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_happy_run_elastic_fault_tolerant_fails\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_metric_class\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_prepare_np_data\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_pytorch_get_optimizer_with_unscaled_lr\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_restore_from_checkpoint\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ '
+ 'test_torch_direct_parquet_train\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_torch_param_serialize\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_transform_multi_class\n'
+ 'test.test_spark_torch.SparkTorchTests ‑ test_update_metrics'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.2.junit-xml b/python/test/files/junit-xml/pytest/junit.spark.integration.2.junit-xml
new file mode 100644
index 0000000..182df17
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.spark.integration.2.junit-xml
@@ -0,0 +1,44 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/test_util.py:2076: Not a test.
+
+
+
+
+
+
+
+
+
+
+
+ /horovod/test/test_spark_torch.py:469: elastic horovod does not support shutdown from the spark driver while elastic driver is waiting for hosts to come up
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.2.results b/python/test/files/junit-xml/pytest/junit.spark.integration.2.results
new file mode 100644
index 0000000..fc82e8c
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.spark.integration.2.results
@@ -0,0 +1,483 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=35,
+ suite_skipped=2,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=172,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='pytest',
+ tests=35,
+ skipped=2,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=454,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_batch_generator_fn',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.006
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=385,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_calculate_shuffle_buffer_size',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.006
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=371,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_calculate_shuffle_buffer_size_small_row_size',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=410,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_convert_custom_sparse_to_dense_bare_keras_fn',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=399,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_custom_sparse_to_dense_fn',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.038
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=75,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_fit_model',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=12.424
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=103,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_fit_model_multiclass',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=31.925
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=186,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_keras_direct_parquet_train',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=11.57
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=225,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_keras_model_checkpoint_callback',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=14.517
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=322,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_model_serialization',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=7.223
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=575,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_prep_data_tf_keras_fn_with_sparse_col',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.051
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=612,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_prep_data_tf_keras_fn_without_sparse_col',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.034
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=416,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_prepare_data_bare_keras_fn',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=528,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_reshape',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.04
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=139,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_restore_from_checkpoint',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=8.92
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_keras.py',
+ line=361,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_serialize_param_value',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='../usr/local/lib/python3.6/dist-packages/tensorflow_core/python/'
+ 'framework/test_util.py',
+ line=2075,
+ class_name='test.test_spark_keras.SparkKerasTests',
+ test_name='test_session',
+ result='skipped',
+ message='Not a test.',
+ content='/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framewo'
+ 'rk/test_util.py:2076: Not a test.',
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=372,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_calculate_loss_with_sample_weight',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.025
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=401,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_calculate_loss_without_sample_weight',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=198,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_calculate_shuffle_buffer_size',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=184,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_calculate_shuffle_buffer_size_small_row_size',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=227,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_construct_metric_value_holders_one_metric_for_all_labels',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=73,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_fit_model',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=13.773
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=269,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_get_metric_avgs',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.007
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=433,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_happy_run_elastic',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=9.573
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=447,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_happy_run_elastic_fault_tolerant',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=27.56
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=468,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_happy_run_elastic_fault_tolerant_fails',
+ result='skipped',
+ message='elastic horovod does not support shutdown from the spark driver '
+ 'while elastic driver is waiting for hosts to come up',
+ content='/horovod/test/test_spark_torch.py:469: elastic horovod does not '
+ 'support shutdown from the spark driver while elastic driver is '
+ 'waiting for hosts to come up',
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=213,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_metric_class',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.023
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=251,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_prepare_np_data',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=7.061
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=167,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_pytorch_get_optimizer_with_unscaled_lr',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=103,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_restore_from_checkpoint',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=8.464
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=335,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_torch_direct_parquet_train',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=9.825
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=325,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_torch_param_serialize',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=140,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_transform_multi_class',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=7.384
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='pytest/junit.spark.integration.2.xml',
+ test_file='test/test_spark_torch.py',
+ line=291,
+ class_name='test.test_spark_torch.SparkTorchTests',
+ test_name='test_update_metrics',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.008
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/pytest/junit.spark.integration.2.xml b/python/test/files/junit-xml/pytest/junit.spark.integration.2.xml
new file mode 100644
index 0000000..1464a93
--- /dev/null
+++ b/python/test/files/junit-xml/pytest/junit.spark.integration.2.xml
@@ -0,0 +1 @@
+/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/test_util.py:2076: Not a test./horovod/test/test_spark_torch.py:469: elastic horovod does not support shutdown from the spark driver while elastic driver is waiting for hosts to come up
\ No newline at end of file
diff --git a/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations
new file mode 100644
index 0000000..59ed37a
--- /dev/null
+++ b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.annotations
@@ -0,0 +1,49 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 5 tests pass in 2s',
+ 'summary':
+ '5 tests\u2002\u2003\u20035 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '2s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20030 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20030 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf'
+ '0U8d6igSz8TYkZLlrHqKfr3VlOz25t5MBdfQCvLR9Y1jFsPrsDsUTgwB2FPSIcL15D3ZL'
+ '2Uf7HBSaItYhGgf0IhGkwG/ZF7Yda5l79a5CoWuW5Js+/gCNJidhX8fgDdy7133QAAAA='
+ '=\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 5 tests, see "Raw output" for the full list of tests.',
+ 'title': '5 tests found',
+ 'raw_details':
+ 'uk.co.gresearch.spark.diff.DiffOptionsSuite ‑ diff options diff '
+ 'value\nuk.co.gresearch.spark.diff.DiffOptionsSuite ‑ diff options '
+ 'left and right prefixes\n'
+ 'uk.co.gresearch.spark.diff.DiffOptionsSuite ‑ diff options with '
+ 'change column name same as diff column\n'
+ 'uk.co.gresearch.spark.diff.DiffOptionsSuite ‑ diff options with '
+ 'empty diff column name\n'
+ 'uk.co.gresearch.spark.diff.DiffOptionsSuite ‑ fluent methods of '
+ 'diff options'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.junit-xml b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.junit-xml
new file mode 100644
index 0000000..0d757f8
--- /dev/null
+++ b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.junit-xml
@@ -0,0 +1,84 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.results b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.results
new file mode 100644
index 0000000..f03c190
--- /dev/null
+++ b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.results
@@ -0,0 +1,88 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=5,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=2,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='uk.co.gresearch.spark.diff.DiffOptionsSuite',
+ tests=5,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.xml',
+ test_file=None,
+ line=None,
+ class_name='uk.co.gresearch.spark.diff.DiffOptionsSuite',
+ test_name='diff options with empty diff column name',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.259
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.xml',
+ test_file=None,
+ line=None,
+ class_name='uk.co.gresearch.spark.diff.DiffOptionsSuite',
+ test_name='diff options left and right prefixes',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.959
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.xml',
+ test_file=None,
+ line=None,
+ class_name='uk.co.gresearch.spark.diff.DiffOptionsSuite',
+ test_name='diff options diff value',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.xml',
+ test_file=None,
+ line=None,
+ class_name='uk.co.gresearch.spark.diff.DiffOptionsSuite',
+ test_name='diff options with change column name same as diff column',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.xml',
+ test_file=None,
+ line=None,
+ class_name='uk.co.gresearch.spark.diff.DiffOptionsSuite',
+ test_name='fluent methods of diff options',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.xml b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.xml
new file mode 100644
index 0000000..67162d7
--- /dev/null
+++ b/python/test/files/junit-xml/scalatest/TEST-uk.co.gresearch.spark.diff.DiffOptionsSuite.xml
@@ -0,0 +1,105 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/testsuite-in-testsuite.annotations b/python/test/files/junit-xml/testsuite-in-testsuite.annotations
new file mode 100644
index 0000000..7c5453e
--- /dev/null
+++ b/python/test/files/junit-xml/testsuite-in-testsuite.annotations
@@ -0,0 +1,42 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 5 tests pass in 4s',
+ 'summary':
+ '5 tests\u2002\u2003\u20035 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '4s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '4 suites\u2003\u20030 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20030 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBQFf'
+ '0VctyioTT8TYkqXfMRVV9G/Z6aluzNzYE4qQQlHZzJ0hLoAPsEYYQ3IPFiTMR7+uaayFx'
+ 'c4b8UORxT9JyQD1QiBaDEbDKb0nlnnXv5riatY4rrFrdbgI+RF3MbodQOdcxe63QAAAA='
+ '=\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 5 tests, see "Raw output" for the full list of tests.',
+ 'title': '5 tests found',
+ 'raw_details':
+ 'someName ‑ TestCase1\nsomeName ‑ TestCase2\nsomeName ‑ TestCase3\n'
+ 'someName ‑ TestCase4\nsomeName ‑ TestCase5'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/testsuite-in-testsuite.junit-xml b/python/test/files/junit-xml/testsuite-in-testsuite.junit-xml
new file mode 100644
index 0000000..a67a292
--- /dev/null
+++ b/python/test/files/junit-xml/testsuite-in-testsuite.junit-xml
@@ -0,0 +1,16 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/testsuite-in-testsuite.results b/python/test/files/junit-xml/testsuite-in-testsuite.results
new file mode 100644
index 0000000..4e029c4
--- /dev/null
+++ b/python/test/files/junit-xml/testsuite-in-testsuite.results
@@ -0,0 +1,115 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=4,
+ suite_tests=5,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=4,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='TestSuite1',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='TestSuite2.1',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='TestSuite2',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='Project Test Suite',
+ tests=5,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='testsuite-in-testsuite.xml',
+ test_file='/somepath',
+ line=34,
+ class_name='someName',
+ test_name='TestCase1',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.32159
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='testsuite-in-testsuite.xml',
+ test_file='/somepath',
+ line=65,
+ class_name='someName',
+ test_name='TestCase2',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.321319
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='testsuite-in-testsuite.xml',
+ test_file='/somepath',
+ line=40,
+ class_name='someName',
+ test_name='TestCase3',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.08817
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='testsuite-in-testsuite.xml',
+ test_file='/somepath',
+ line=40,
+ class_name='someName',
+ test_name='TestCase4',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.98817
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='testsuite-in-testsuite.xml',
+ test_file='/somepath',
+ line=40,
+ class_name='someName',
+ test_name='TestCase5',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.08817
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/testsuite-in-testsuite.xml b/python/test/files/junit-xml/testsuite-in-testsuite.xml
new file mode 100644
index 0000000..a5c4b69
--- /dev/null
+++ b/python/test/files/junit-xml/testsuite-in-testsuite.xml
@@ -0,0 +1,16 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/testsuite-root.annotations b/python/test/files/junit-xml/testsuite-root.annotations
new file mode 100644
index 0000000..7c5453e
--- /dev/null
+++ b/python/test/files/junit-xml/testsuite-root.annotations
@@ -0,0 +1,42 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 5 tests pass in 4s',
+ 'summary':
+ '5 tests\u2002\u2003\u20035 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '4s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '4 suites\u2003\u20030 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20030 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMywqAIBQFf'
+ '0VctyioTT8TYkqXfMRVV9G/Z6aluzNzYE4qQQlHZzJ0hLoAPsEYYQ3IPFiTMR7+uaayFx'
+ 'c4b8UORxT9JyQD1QiBaDEbDKb0nlnnXv5riatY4rrFrdbgI+RF3MbodQOdcxe63QAAAA='
+ '=\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 5 tests, see "Raw output" for the full list of tests.',
+ 'title': '5 tests found',
+ 'raw_details':
+ 'someName ‑ TestCase1\nsomeName ‑ TestCase2\nsomeName ‑ TestCase3\n'
+ 'someName ‑ TestCase4\nsomeName ‑ TestCase5'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/testsuite-root.junit-xml b/python/test/files/junit-xml/testsuite-root.junit-xml
new file mode 100644
index 0000000..b1ab523
--- /dev/null
+++ b/python/test/files/junit-xml/testsuite-root.junit-xml
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/testsuite-root.results b/python/test/files/junit-xml/testsuite-root.results
new file mode 100644
index 0000000..a259c63
--- /dev/null
+++ b/python/test/files/junit-xml/testsuite-root.results
@@ -0,0 +1,115 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=4,
+ suite_tests=5,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=4,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='TestSuite1',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='TestSuite2.1',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='TestSuite2',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='Project Test Suite',
+ tests=5,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='testsuite-root.xml',
+ test_file='/somepath',
+ line=34,
+ class_name='someName',
+ test_name='TestCase1',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.32159
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='testsuite-root.xml',
+ test_file='/somepath',
+ line=65,
+ class_name='someName',
+ test_name='TestCase2',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.321319
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='testsuite-root.xml',
+ test_file='/somepath',
+ line=40,
+ class_name='someName',
+ test_name='TestCase3',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.08817
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='testsuite-root.xml',
+ test_file='/somepath',
+ line=40,
+ class_name='someName',
+ test_name='TestCase4',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.98817
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='testsuite-root.xml',
+ test_file='/somepath',
+ line=40,
+ class_name='someName',
+ test_name='TestCase5',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.08817
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/testsuite-root.xml b/python/test/files/junit-xml/testsuite-root.xml
new file mode 100644
index 0000000..b3beb1f
--- /dev/null
+++ b/python/test/files/junit-xml/testsuite-root.xml
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/tst/disabled.annotations b/python/test/files/junit-xml/tst/disabled.annotations
new file mode 100644
index 0000000..3f1f6ba
--- /dev/null
+++ b/python/test/files/junit-xml/tst/disabled.annotations
@@ -0,0 +1,294 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '1 errors, 19 fail, 5 skipped, 6 pass in 0s',
+ 'summary':
+ '\u205f\u20041 files\u2004\u2003\u205f\u20042 suites\u2004\u2003\u2002'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '31 tests\u2003\u205f\u20046 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '5 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '19 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\u20031 '
+ '[:fire:](https://github.com/step-security/publish-unit-test-result-ac'
+ 'tion/blob/VERSION/README.md#the-symbols "test errors")\n31 runs\u2006\u2003'
+ '11 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '0 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '19 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\u20031 '
+ '[:fire:](https://github.com/step-security/publish-unit-test-result-ac'
+ 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NQQqAIBBFr'
+ 'yKuW2RRUJcJsaIhyxh1Fd29sdLczXsf3px8Bj1Z3jNRMG49uAcqgtGjdGB2wpKQBhemWk'
+ 'QYrFeKTPuLFQ4STRKzBB3aXTITosHvHfo9FcMdg+IXb7CMnPcekeeU2TZwBN/F7CL5dQP'
+ 'prhoZ4gAAAA==\n',
+ 'annotations': [
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'factorial_of_value_from_fixture failed',
+ 'raw_details':
+ '/home/ivan/prj/tst/tests/failed/main.cpp:72: error: '
+ 'check_eq(3628800, 3628801)'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'positive_arguments_must_produce_expected_result failed',
+ 'raw_details':
+ '/home/ivan/prj/tst/tests/failed/main.cpp:45: error: check_ne(6, '
+ '6)hello world!'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'failure',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'test_which_throws_unknown_exception with error',
+ 'raw_details': 'uncaught (anonymous namespace)::some_unknown_exception'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'positive_arguments_must_produce_expected_result[2] failed',
+ 'raw_details': '/home/ivan/prj/tst/tests/failed/main.cpp:85: error: check(false)'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'factorial_of_value_from_fixture[0] failed',
+ 'raw_details': '/home/ivan/prj/tst/tests/failed/main.cpp:109: error: expected 2'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'test_which_fails_check_eq_with_custom_message failed',
+ 'raw_details':
+ '/home/ivan/prj/tst/tests/failed/main.cpp:62: error: check_eq(6, '
+ '7)hello world!'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'check_ge_print failed',
+ 'raw_details':
+ '/home/ivan/prj/tst/tests/failed/checks.cpp:59: error: check_ge(2, '
+ '3)failed!'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'check_ge failed',
+ 'raw_details':
+ '/home/ivan/prj/tst/tests/failed/checks.cpp:55: error: check_ge(2, '
+ '3)Hello world!'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'check_gt_print failed',
+ 'raw_details':
+ '/home/ivan/prj/tst/tests/failed/checks.cpp:43: error: check_gt(2, '
+ '2)failed!'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'check_lt_print failed',
+ 'raw_details':
+ '/home/ivan/prj/tst/tests/failed/checks.cpp:35: error: check_lt(2, '
+ '2)failed!'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'check_print failed',
+ 'raw_details': '/home/ivan/prj/tst/tests/failed/checks.cpp:11: error: failed!'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'check_gt failed',
+ 'raw_details':
+ '/home/ivan/prj/tst/tests/failed/checks.cpp:39: error: check_gt(2, '
+ '2)Hello world!'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'check failed',
+ 'raw_details': '/home/ivan/prj/tst/tests/failed/checks.cpp:7: error: Hello world!'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'check_le_print failed',
+ 'raw_details':
+ '/home/ivan/prj/tst/tests/failed/checks.cpp:51: error: check_le(2, '
+ '1)failed!'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'check_eq failed',
+ 'raw_details':
+ '/home/ivan/prj/tst/tests/failed/checks.cpp:15: error: check_eq(1, '
+ '2)Hello world!'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'check_eq_print failed',
+ 'raw_details':
+ '/home/ivan/prj/tst/tests/failed/checks.cpp:19: error: check_eq(1, '
+ '2)failed!'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'check_le failed',
+ 'raw_details':
+ '/home/ivan/prj/tst/tests/failed/checks.cpp:47: error: check_le(2, '
+ '1)Hello world!'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'check_ne failed',
+ 'raw_details':
+ '/home/ivan/prj/tst/tests/failed/checks.cpp:23: error: check_ne(2, '
+ '2)Hello world!'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'check_lt failed',
+ 'raw_details':
+ '/home/ivan/prj/tst/tests/failed/checks.cpp:31: error: check_lt(2, '
+ '2)Hello world!'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tst/disabled.xml\u2003[took 0s]',
+ 'title': 'check_ne_print failed',
+ 'raw_details':
+ '/home/ivan/prj/tst/tests/failed/checks.cpp:27: error: check_ne(2, '
+ '2)failed!'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There are 5 skipped tests, see "Raw output" for the full list of '
+ 'skipped tests.',
+ 'title': '5 skipped tests found',
+ 'raw_details':
+ 'disabled_param_test[0]\ndisabled_param_test[1]\n'
+ 'disabled_param_test[2]\ndisabled_param_test[3]\ndisabled_test'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 31 tests, see "Raw output" for the full list of tests.',
+ 'title': '31 tests found',
+ 'raw_details':
+ 'check\ncheck_eq\ncheck_eq_print\ncheck_ge\ncheck_ge_print\n'
+ 'check_gt\ncheck_gt_print\ncheck_le\ncheck_le_print\ncheck_lt\n'
+ 'check_lt_print\ncheck_ne\ncheck_ne_print\ncheck_print\n'
+ 'disabled_param_test[0]\ndisabled_param_test[1]\n'
+ 'disabled_param_test[2]\ndisabled_param_test[3]\ndisabled_test\n'
+ 'factorial_of_value_from_fixture\n'
+ 'factorial_of_value_from_fixture[0]\n'
+ 'factorial_of_value_from_fixture[1]\n'
+ 'factorial_of_value_from_fixture[2]\n'
+ 'factorial_of_value_from_fixture[3]\n'
+ 'positive_arguments_must_produce_expected_result\n'
+ 'positive_arguments_must_produce_expected_result[0]\n'
+ 'positive_arguments_must_produce_expected_result[1]\n'
+ 'positive_arguments_must_produce_expected_result[2]\n'
+ 'positive_arguments_must_produce_expected_result[3]\n'
+ 'test_which_fails_check_eq_with_custom_message\n'
+ 'test_which_throws_unknown_exception'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/tst/disabled.junit-xml b/python/test/files/junit-xml/tst/disabled.junit-xml
new file mode 100644
index 0000000..f9bd94d
--- /dev/null
+++ b/python/test/files/junit-xml/tst/disabled.junit-xml
@@ -0,0 +1,78 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/tst/disabled.results b/python/test/files/junit-xml/tst/disabled.results
new file mode 100644
index 0000000..f65756b
--- /dev/null
+++ b/python/test/files/junit-xml/tst/disabled.results
@@ -0,0 +1,450 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=2,
+ suite_tests=31,
+ suite_skipped=0,
+ suite_failures=19,
+ suite_errors=1,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='factorial',
+ tests=17,
+ skipped=0,
+ failures=5,
+ errors=1,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='failing_checks',
+ tests=14,
+ skipped=0,
+ failures=14,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='positive_arguments_must_produce_expected_result[0]',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='factorial_of_value_from_fixture',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/main.cpp:72: error: '
+ 'check_eq(3628800, 3628801)',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='factorial_of_value_from_fixture[3]',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='factorial_of_value_from_fixture[2]',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='disabled_test',
+ result='disabled',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='positive_arguments_must_produce_expected_result',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/main.cpp:45: error: check_ne(6, '
+ '6)hello world!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='test_which_throws_unknown_exception',
+ result='error',
+ message='uncaught (anonymous namespace)::some_unknown_exception',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='positive_arguments_must_produce_expected_result[2]',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/main.cpp:85: error: check(false)',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='positive_arguments_must_produce_expected_result[3]',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='factorial_of_value_from_fixture[0]',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/main.cpp:109: error: expected 2',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='disabled_param_test[0]',
+ result='disabled',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='disabled_param_test[1]',
+ result='disabled',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='disabled_param_test[2]',
+ result='disabled',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='test_which_fails_check_eq_with_custom_message',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/main.cpp:62: error: check_eq(6, '
+ '7)hello world!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='disabled_param_test[3]',
+ result='disabled',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='positive_arguments_must_produce_expected_result[1]',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='factorial_of_value_from_fixture[1]',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='check_ge_print',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/checks.cpp:59: error: check_ge(2, '
+ '3)failed!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='check_ge',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/checks.cpp:55: error: check_ge(2, '
+ '3)Hello world!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='check_gt_print',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/checks.cpp:43: error: check_gt(2, '
+ '2)failed!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='check_lt_print',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/checks.cpp:35: error: check_lt(2, '
+ '2)failed!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='check_print',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/checks.cpp:11: error: failed!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='check_gt',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/checks.cpp:39: error: check_gt(2, '
+ '2)Hello world!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='check',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/checks.cpp:7: error: Hello world!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='check_le_print',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/checks.cpp:51: error: check_le(2, '
+ '1)failed!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='check_eq',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/checks.cpp:15: error: check_eq(1, '
+ '2)Hello world!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='check_eq_print',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/checks.cpp:19: error: check_eq(1, '
+ '2)failed!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='check_le',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/checks.cpp:47: error: check_le(2, '
+ '1)Hello world!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='check_ne',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/checks.cpp:23: error: check_ne(2, '
+ '2)Hello world!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='check_lt',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/checks.cpp:31: error: check_lt(2, '
+ '2)Hello world!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tst/disabled.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='check_ne_print',
+ result='failure',
+ message='/home/ivan/prj/tst/tests/failed/checks.cpp:27: error: check_ne(2, '
+ '2)failed!',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/tst/disabled.xml b/python/test/files/junit-xml/tst/disabled.xml
new file mode 100644
index 0000000..a724e3e
--- /dev/null
+++ b/python/test/files/junit-xml/tst/disabled.xml
@@ -0,0 +1,78 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/junit-xml/unsupported-unicode.annotations b/python/test/files/junit-xml/unsupported-unicode.annotations
new file mode 100644
index 0000000..fa0a557
--- /dev/null
+++ b/python/test/files/junit-xml/unsupported-unicode.annotations
@@ -0,0 +1,99 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '2 errors, 2 fail, 2 skipped, 1 pass in 8s',
+ 'summary':
+ '7 tests\u2002\u2003\u20031 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '8s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20032 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20032 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20032 '
+ '[:fire:](https://github.com/step-security/publish-unit-test-result-ac'
+ 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr'
+ '0KYXXTReBlDUGIjgikwGe9uQVDc/ntt3skV6MXxkbUN4y6Af2EOKDxYQzgQ0sHHU1/25I'
+ 'KU+TeLDQ4S3SuUAP0TC6LFbDCY0ouzzj381RJXscR1S9p9B0+QF3Or4NcNSlhwMN0AAAA'
+ '=\n',
+ 'annotations': [
+ {
+ 'path': 'test/test-4.py',
+ 'start_line': 4,
+ 'end_line': 4,
+ 'annotation_level': 'warning',
+ 'message': 'unsupported-unicode.xml\u2003[took 1s]',
+ 'title': 'test 4 failed',
+ 'raw_details':
+ 'Some unsupported unicode characters: '
+ '헴䜝헱홐㣇㿷䔭\\U0001237a\\U000214ff\\U00020109㦓\nfailed'
+ },
+ {
+ 'path': 'test/test-5.py',
+ 'start_line': 5,
+ 'end_line': 5,
+ 'annotation_level': 'warning',
+ 'message': 'unsupported-unicode.xml\u2003[took 1s]',
+ 'title': 'test 5 failed',
+ 'raw_details':
+ 'message\nSome unsupported unicode characters: '
+ '헴䜝헱홐㣇㿷䔭\\U0001237a\\U000214ff\\U00020109㦓'
+ },
+ {
+ 'path': 'test/test-6.py',
+ 'start_line': 6,
+ 'end_line': 6,
+ 'annotation_level': 'failure',
+ 'message': 'unsupported-unicode.xml\u2003[took 1s]',
+ 'title': 'test 6 with error',
+ 'raw_details':
+ 'Some unsupported unicode characters: '
+ '헴䜝헱홐㣇㿷䔭\\U0001237a\\U000214ff\\U00020109㦓\nerror'
+ },
+ {
+ 'path': 'test/test-7.py',
+ 'start_line': 7,
+ 'end_line': 7,
+ 'annotation_level': 'failure',
+ 'message': 'unsupported-unicode.xml\u2003[took 1s]',
+ 'title': 'test 7 with error',
+ 'raw_details':
+ 'message\nSome unsupported unicode characters: '
+ '헴䜝헱홐㣇㿷䔭\\U0001237a\\U000214ff\\U00020109㦓'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There are 2 skipped tests, see "Raw output" for the full list of '
+ 'skipped tests.',
+ 'title': '2 skipped tests found',
+ 'raw_details': 'test 2\ntest 3'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 7 tests, see "Raw output" for the full list of tests.',
+ 'title': '7 tests found',
+ 'raw_details':
+ 'test 1 헴䜝헱홐㣇㿷䔭\\U0001237a\\U000214ff\\U00020109㦓\ntest 2\ntest 3\n'
+ 'test 4\ntest 5\ntest 6\ntest 7'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/unsupported-unicode.junit-xml b/python/test/files/junit-xml/unsupported-unicode.junit-xml
new file mode 100644
index 0000000..d05c2df
--- /dev/null
+++ b/python/test/files/junit-xml/unsupported-unicode.junit-xml
@@ -0,0 +1,30 @@
+
+
+
+
+
+ skipped
+
+
+
+ Some unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓
+
+
+
+ failed
+
+
+
+ Some unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓
+
+
+
+ error
+
+
+
+ Some unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓
+
+
+
+
diff --git a/python/test/files/junit-xml/unsupported-unicode.results b/python/test/files/junit-xml/unsupported-unicode.results
new file mode 100644
index 0000000..98e49b6
--- /dev/null
+++ b/python/test/files/junit-xml/unsupported-unicode.results
@@ -0,0 +1,114 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=7,
+ suite_skipped=2,
+ suite_failures=2,
+ suite_errors=2,
+ suite_time=8,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='pytest',
+ tests=7,
+ skipped=2,
+ failures=2,
+ errors=2,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='unsupported-unicode.xml',
+ test_file='test/test-1.py',
+ line=1,
+ class_name=None,
+ test_name='test 1 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.23
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='unsupported-unicode.xml',
+ test_file='test/test-2.py',
+ line=2,
+ class_name=None,
+ test_name='test 2',
+ result='skipped',
+ message='Some unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓',
+ content='skipped\n ',
+ stdout=None,
+ stderr=None,
+ time=1.23
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='unsupported-unicode.xml',
+ test_file='test/test-3.py',
+ line=3,
+ class_name=None,
+ test_name='test 3',
+ result='skipped',
+ message='message',
+ content='Some unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓\n ',
+ stdout=None,
+ stderr=None,
+ time=1.23
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='unsupported-unicode.xml',
+ test_file='test/test-4.py',
+ line=4,
+ class_name=None,
+ test_name='test 4',
+ result='failure',
+ message='Some unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓',
+ content='failed\n ',
+ stdout=None,
+ stderr=None,
+ time=1.23
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='unsupported-unicode.xml',
+ test_file='test/test-5.py',
+ line=5,
+ class_name=None,
+ test_name='test 5',
+ result='failure',
+ message='message',
+ content='Some unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓\n ',
+ stdout=None,
+ stderr=None,
+ time=1.23
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='unsupported-unicode.xml',
+ test_file='test/test-6.py',
+ line=6,
+ class_name=None,
+ test_name='test 6',
+ result='error',
+ message='Some unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓',
+ content='error\n ',
+ stdout=None,
+ stderr=None,
+ time=1.23
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='unsupported-unicode.xml',
+ test_file='test/test-7.py',
+ line=7,
+ class_name=None,
+ test_name='test 7',
+ result='error',
+ message='message',
+ content='Some unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓\n ',
+ stdout=None,
+ stderr=None,
+ time=1.23
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/unsupported-unicode.xml b/python/test/files/junit-xml/unsupported-unicode.xml
new file mode 100644
index 0000000..cec4e5a
--- /dev/null
+++ b/python/test/files/junit-xml/unsupported-unicode.xml
@@ -0,0 +1,30 @@
+
+
+
+
+
+ skipped
+
+
+
+ Some unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓
+
+
+
+ failed
+
+
+
+ Some unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓
+
+
+
+ error
+
+
+
+ Some unsupported unicode characters: 헴䜝헱홐㣇㿷䔭𒍺𡓿𠄉㦓
+
+
+
+
diff --git a/python/test/files/junit-xml/with-xml-entities.annotations b/python/test/files/junit-xml/with-xml-entities.annotations
new file mode 100644
index 0000000..b8a6577
--- /dev/null
+++ b/python/test/files/junit-xml/with-xml-entities.annotations
@@ -0,0 +1,76 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '1 errors, 1 fail, 2 skipped in 0s',
+ 'summary':
+ '4 tests\u2002\u2003\u20030 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20032 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20031 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 '
+ '[:fire:](https://github.com/step-security/publish-unit-test-result-ac'
+ 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MQQ5AMBBFr'
+ '9J0bYFYuYw0RUyUyky7Enc3qmV2/72fvFPP4CbSvWoqpSlC+GCMaAL4nbFm5CM8V1f2QN'
+ 'FaeQ60wsGi/cRswOXaKyZEj9lg3EvvmTL38l9LLGKJZcv6bYPAkJeixejrBpBXIV3dAAA'
+ 'A\n',
+ 'annotations': [
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'with-xml-entities.xml\u2003[took 0s]',
+ 'title': "Test with 'apostrophe' in the test name failed",
+ 'raw_details': "A message with 'apostrophes'\nContent with 'apostrophes'"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'failure',
+ 'message': 'with-xml-entities.xml\u2003[took 0s]',
+ 'title': 'Test with & in the test name with error',
+ 'raw_details': 'A message with &\nContent with &'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There are 2 skipped tests, see "Raw output" for the full list of '
+ 'skipped tests.',
+ 'title': '2 skipped tests found',
+ 'raw_details':
+ 'Test with "quotes" in the test name\nTest with < and > in the test '
+ 'name'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 4 tests, see "Raw output" for the full list of tests.',
+ 'title': '4 tests found',
+ 'raw_details':
+ 'Test with "quotes" in the test name\nTest with & in the test name\n'
+ 'Test with \'apostrophe\' in the test name\nTest with < and > in '
+ 'the test name'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/with-xml-entities.junit-xml b/python/test/files/junit-xml/with-xml-entities.junit-xml
new file mode 100644
index 0000000..0a38ad9
--- /dev/null
+++ b/python/test/files/junit-xml/with-xml-entities.junit-xml
@@ -0,0 +1,17 @@
+
+
+
+
+ Content with "quotes"
+
+
+ Content with 'apostrophes'
+
+
+ Content with &
+
+
+ Content with < and >
+
+
+
diff --git a/python/test/files/junit-xml/with-xml-entities.results b/python/test/files/junit-xml/with-xml-entities.results
new file mode 100644
index 0000000..1c8006d
--- /dev/null
+++ b/python/test/files/junit-xml/with-xml-entities.results
@@ -0,0 +1,75 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=4,
+ suite_skipped=2,
+ suite_failures=1,
+ suite_errors=1,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name=None,
+ tests=4,
+ skipped=2,
+ failures=1,
+ errors=1,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='with-xml-entities.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='Test with "quotes" in the test name',
+ result='skipped',
+ message='A message with "quotes"',
+ content='Content with "quotes"',
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='with-xml-entities.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name="Test with 'apostrophe' in the test name",
+ result='failure',
+ message="A message with 'apostrophes'",
+ content="Content with 'apostrophes'",
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='with-xml-entities.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='Test with & in the test name',
+ result='error',
+ message='A message with &',
+ content='Content with &',
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='with-xml-entities.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='Test with < and > in the test name',
+ result='skipped',
+ message='A message with < and >',
+ content='Content with < and >',
+ stdout=None,
+ stderr=None,
+ time=0.0
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/with-xml-entities.xml b/python/test/files/junit-xml/with-xml-entities.xml
new file mode 100644
index 0000000..63f8c6d
--- /dev/null
+++ b/python/test/files/junit-xml/with-xml-entities.xml
@@ -0,0 +1,17 @@
+
+
+
+
+ Content with "quotes"
+
+
+ Content with 'apostrophes'
+
+
+ Content with &
+
+
+ Content with < and >
+
+
+
\ No newline at end of file
diff --git a/python/test/files/junit-xml/xunit/xunit.annotations b/python/test/files/junit-xml/xunit/xunit.annotations
new file mode 100644
index 0000000..82a3ca7
--- /dev/null
+++ b/python/test/files/junit-xml/xunit/xunit.annotations
@@ -0,0 +1,41 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 2 tests pass in 0s',
+ 'summary':
+ '2 tests\u2002\u2003\u20032 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20030 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20030 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBBEr'
+ '0KoLdTSyxCCEDfyMQtUxrsLCgrdvJnJO6kCLT1dyDQQ6iOED9aIPICzCceEaQh5mmtmPg'
+ 'rRFzsc7ZspDrorJKLD0mC01Zdjq3v5tz3cyB5uXcIZAyFBScRvnF43yWbLod0AAAA=\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 2 tests, see "Raw output" for the full list of tests.',
+ 'title': '2 tests found',
+ 'raw_details':
+ 'mytestapp.Tests.AttriubteTests.GetTestNoFeature\n'
+ 'mytestapp.Tests.AttriubteTests.SetTestNoFeature'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/junit-xml/xunit/xunit.junit-xml b/python/test/files/junit-xml/xunit/xunit.junit-xml
new file mode 100644
index 0000000..4cbcee2
--- /dev/null
+++ b/python/test/files/junit-xml/xunit/xunit.junit-xml
@@ -0,0 +1,15 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/junit-xml/xunit/xunit.results b/python/test/files/junit-xml/xunit/xunit.results
new file mode 100644
index 0000000..9088d0c
--- /dev/null
+++ b/python/test/files/junit-xml/xunit/xunit.results
@@ -0,0 +1,49 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=2,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='Rhino Collection',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='xunit/xunit.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='mytestapp.Tests.AttriubteTests.SetTestNoFeature',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.4540354
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='xunit/xunit.xml',
+ test_file=None,
+ line=None,
+ class_name=None,
+ test_name='mytestapp.Tests.AttriubteTests.GetTestNoFeature',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0039778
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/junit-xml/xunit/xunit.xml b/python/test/files/junit-xml/xunit/xunit.xml
new file mode 100644
index 0000000..cf8c6a0
--- /dev/null
+++ b/python/test/files/junit-xml/xunit/xunit.xml
@@ -0,0 +1,15 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/mocha/tests.annotations b/python/test/files/mocha/tests.annotations
new file mode 100644
index 0000000..4f47301
--- /dev/null
+++ b/python/test/files/mocha/tests.annotations
@@ -0,0 +1,114 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '1 errors, 1 fail, 1 skipped, 2 pass in 12s',
+ 'summary':
+ '5 tests\u2002\u2003\u20032 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '12s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20031 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20031 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 '
+ '[:fire:](https://github.com/step-security/publish-unit-test-result-ac'
+ 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMOw6AIBAFr'
+ '0KobSSx8TKGoMaNfMwClfHu8hXs3sxm56Y7yM3SmYwDodaD+2D1yB0YHZEFDhcXb1Pdi/'
+ 'VCBMGaOOEq31nsHORPbIgGi0Gvay/OPpe51RJ3scR9SxilwAUoi9iD0+cFI3viF94AAAA'
+ '=\n',
+ 'annotations': [
+ {
+ 'path': '/home/runner/work/mocha/mocha/test/unit/runner.spec.js',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'tests.json',
+ 'title':
+ 'Runner instance method grep() should update the runner.total with '
+ 'number of matched tests failed',
+ 'raw_details':
+ 'Required\nError: Required at Context. '
+ '(test/unit/runner.spec.js:43:15) at callFn '
+ '(lib/runnable.js:366:21) at Test.Runnable.run '
+ '(lib/runnable.js:354:5) at Runner.runTest '
+ '(lib/runner.js:666:10) at '
+ '/home/runner/work/mocha/mocha/lib/runner.js:789:12 at next '
+ '(lib/runner.js:581:14) at '
+ '/home/runner/work/mocha/mocha/lib/runner.js:591:7 at next '
+ '(lib/runner.js:474:14) at Immediate._onImmediate '
+ '(lib/runner.js:559:5) at processImmediate '
+ '(internal/timers.js:464:21)'
+ },
+ {
+ 'path': '/home/runner/work/mocha/mocha/test/unit/test.spec.js',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'failure',
+ 'message': 'tests.json\u2003[took 4s]',
+ 'title': 'Test .clone() should copy the title with error',
+ 'raw_details':
+ "[31m[1mexpected[22m[39m [36m'To be cloned'[39m [31m[1mto "
+ "be[22m[39m [36m'Not to be cloned'[39m[41m[30mTo[39m[49m[31m be "
+ "cloned[39m[42m[30mNot[39m[49m[32m [39m[42m[30mto [39m[49m[32mbe "
+ "cloned[39m\nUnexpectedError\n[31m[1mexpected[22m[39m [36m'To be "
+ "cloned'[39m [31m[1mto be[22m[39m [36m'Not to be "
+ "cloned'[39m[41m[30mTo[39m[49m[31m be "
+ "cloned[39m[42m[30mNot[39m[49m[32m [39m[42m[30mto [39m[49m[32mbe "
+ "cloned[39m\nUnexpectedError: [31m[1mexpected[22m[39m [36m'To be "
+ "cloned'[39m [31m[1mto be[22m[39m [36m'Not to be "
+ "cloned'[39m[41m[30mTo[39m[49m[31m be "
+ "cloned[39m[42m[30mNot[39m[49m[32m [39m[42m[30mto [39m[49m[32mbe "
+ "cloned[39m at Context. (test/unit/test.spec.js:26:7) "
+ " at callFn (lib/runnable.js:366:21) at Test.Runnable.run "
+ "(lib/runnable.js:354:5) at Runner.runTest "
+ "(lib/runner.js:666:10) at "
+ "/home/runner/work/mocha/mocha/lib/runner.js:789:12 at next "
+ "(lib/runner.js:581:14) at "
+ "/home/runner/work/mocha/mocha/lib/runner.js:591:7 at next "
+ "(lib/runner.js:474:14) at cbHookRun (lib/runner.js:539:7) at "
+ "done (lib/runnable.js:310:5) at callFn (lib/runnable.js:389:7) "
+ " at Hook.Runnable.run (lib/runnable.js:354:5) at next "
+ "(lib/runner.js:498:10) at Immediate._onImmediate "
+ "(lib/runner.js:559:5) at processImmediate "
+ "(internal/timers.js:464:21) set UNEXPECTED_FULL_TRACE=true to "
+ "see the full stack trace"
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There is 1 skipped test, see "Raw output" for the name of the '
+ 'skipped test.',
+ 'title': '1 skipped test found',
+ 'raw_details': 'Mocha instance method run() should initialize the stats collector'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 5 tests, see "Raw output" for the full list of tests.',
+ 'title': '5 tests found',
+ 'raw_details':
+ 'Context Siblings sequestered sibling should work\nContext nested '
+ 'should work\nMocha instance method run() should initialize the '
+ 'stats collector\nRunner instance method grep() should update the '
+ 'runner.total with number of matched tests\nTest .clone() should '
+ 'copy the title'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/mocha/tests.json b/python/test/files/mocha/tests.json
new file mode 100644
index 0000000..3435f7e
--- /dev/null
+++ b/python/test/files/mocha/tests.json
@@ -0,0 +1,140 @@
+{
+ "stats": {
+ "suites": 3,
+ "tests": 5,
+ "passes": 2,
+ "pending": 1,
+ "failures": 2,
+ "start": "2023-01-14T21:18:12.420Z",
+ "end": "2023-01-14T21:18:16.042Z",
+ "duration": 12
+ },
+ "tests": [
+ {
+ "title": "should work",
+ "fullTitle": "Context nested should work",
+ "file": "/home/runner/work/mocha/mocha/test/unit/context.spec.js",
+ "duration": 3,
+ "currentRetry": 0,
+ "speed": "fast",
+ "err": {}
+ },
+ {
+ "title": "should work",
+ "fullTitle": "Context Siblings sequestered sibling should work",
+ "file": "/home/runner/work/mocha/mocha/test/unit/context.spec.js",
+ "duration": 1,
+ "currentRetry": 0,
+ "speed": "fast",
+ "err": {}
+ },
+ {
+ "title": "should initialize the stats collector",
+ "fullTitle": "Mocha instance method run() should initialize the stats collector",
+ "file": "/home/runner/work/mocha/mocha/test/unit/mocha.spec.js",
+ "currentRetry": 0,
+ "err": {}
+ },
+ {
+ "title": "should update the runner.total with number of matched tests",
+ "fullTitle": "Runner instance method grep() should update the runner.total with number of matched tests",
+ "file": "/home/runner/work/mocha/mocha/test/unit/runner.spec.js",
+ "duration": 0,
+ "currentRetry": 0,
+ "err": {
+ "stack": "Error: Required\n at Context. (test/unit/runner.spec.js:43:15)\n at callFn (lib/runnable.js:366:21)\n at Test.Runnable.run (lib/runnable.js:354:5)\n at Runner.runTest (lib/runner.js:666:10)\n at /home/runner/work/mocha/mocha/lib/runner.js:789:12\n at next (lib/runner.js:581:14)\n at /home/runner/work/mocha/mocha/lib/runner.js:591:7\n at next (lib/runner.js:474:14)\n at Immediate._onImmediate (lib/runner.js:559:5)\n at processImmediate (internal/timers.js:464:21)",
+ "message": "Required"
+ }
+ },
+ {
+ "title": "should copy the title",
+ "fullTitle": "Test .clone() should copy the title",
+ "file": "/home/runner/work/mocha/mocha/test/unit/test.spec.js",
+ "duration": 4,
+ "currentRetry": 0,
+ "err": {
+ "errorMode": "default",
+ "stack": "UnexpectedError: \n\u001b[31m\u001b[1mexpected\u001b[22m\u001b[39m \u001b[36m'To be cloned'\u001b[39m \u001b[31m\u001b[1mto be\u001b[22m\u001b[39m \u001b[36m'Not to be cloned'\u001b[39m\n\n\u001b[41m\u001b[30mTo\u001b[39m\u001b[49m\u001b[31m be cloned\u001b[39m\n\u001b[42m\u001b[30mNot\u001b[39m\u001b[49m\u001b[32m \u001b[39m\u001b[42m\u001b[30mto \u001b[39m\u001b[49m\u001b[32mbe cloned\u001b[39m\n\n at Context. (test/unit/test.spec.js:26:7)\n at callFn (lib/runnable.js:366:21)\n at Test.Runnable.run (lib/runnable.js:354:5)\n at Runner.runTest (lib/runner.js:666:10)\n at /home/runner/work/mocha/mocha/lib/runner.js:789:12\n at next (lib/runner.js:581:14)\n at /home/runner/work/mocha/mocha/lib/runner.js:591:7\n at next (lib/runner.js:474:14)\n at cbHookRun (lib/runner.js:539:7)\n at done (lib/runnable.js:310:5)\n at callFn (lib/runnable.js:389:7)\n at Hook.Runnable.run (lib/runnable.js:354:5)\n at next (lib/runner.js:498:10)\n at Immediate._onImmediate (lib/runner.js:559:5)\n at processImmediate (internal/timers.js:464:21)\n set UNEXPECTED_FULL_TRACE=true to see the full stack trace",
+ "parent": {
+ "errorMode": "default",
+ "parent": {
+ "errorMode": "default",
+ "parent": null,
+ "name": "UnexpectedError",
+ "label": "should equal"
+ },
+ "name": "UnexpectedError"
+ },
+ "name": "UnexpectedError",
+ "message": "\n\u001b[31m\u001b[1mexpected\u001b[22m\u001b[39m \u001b[36m'To be cloned'\u001b[39m \u001b[31m\u001b[1mto be\u001b[22m\u001b[39m \u001b[36m'Not to be cloned'\u001b[39m\n\n\u001b[41m\u001b[30mTo\u001b[39m\u001b[49m\u001b[31m be cloned\u001b[39m\n\u001b[42m\u001b[30mNot\u001b[39m\u001b[49m\u001b[32m \u001b[39m\u001b[42m\u001b[30mto \u001b[39m\u001b[49m\u001b[32mbe cloned\u001b[39m\n",
+ "_hasSerializedErrorMessage": true
+ }
+ }
+ ],
+ "pending": [
+ {
+ "title": "should initialize the stats collector",
+ "fullTitle": "Mocha instance method run() should initialize the stats collector",
+ "file": "/home/runner/work/mocha/mocha/test/unit/mocha.spec.js",
+ "currentRetry": 0,
+ "err": {}
+ }
+ ],
+ "failures": [
+ {
+ "title": "should update the runner.total with number of matched tests",
+ "fullTitle": "Runner instance method grep() should update the runner.total with number of matched tests",
+ "file": "/home/runner/work/mocha/mocha/test/unit/runner.spec.js",
+ "duration": 0,
+ "currentRetry": 0,
+ "err": {
+ "stack": "Error: Required\n at Context. (test/unit/runner.spec.js:43:15)\n at callFn (lib/runnable.js:366:21)\n at Test.Runnable.run (lib/runnable.js:354:5)\n at Runner.runTest (lib/runner.js:666:10)\n at /home/runner/work/mocha/mocha/lib/runner.js:789:12\n at next (lib/runner.js:581:14)\n at /home/runner/work/mocha/mocha/lib/runner.js:591:7\n at next (lib/runner.js:474:14)\n at Immediate._onImmediate (lib/runner.js:559:5)\n at processImmediate (internal/timers.js:464:21)",
+ "message": "Required"
+ }
+ },
+ {
+ "title": "should copy the title",
+ "fullTitle": "Test .clone() should copy the title",
+ "file": "/home/runner/work/mocha/mocha/test/unit/test.spec.js",
+ "duration": 4,
+ "currentRetry": 0,
+ "err": {
+ "errorMode": "default",
+ "stack": "UnexpectedError: \n\u001b[31m\u001b[1mexpected\u001b[22m\u001b[39m \u001b[36m'To be cloned'\u001b[39m \u001b[31m\u001b[1mto be\u001b[22m\u001b[39m \u001b[36m'Not to be cloned'\u001b[39m\n\n\u001b[41m\u001b[30mTo\u001b[39m\u001b[49m\u001b[31m be cloned\u001b[39m\n\u001b[42m\u001b[30mNot\u001b[39m\u001b[49m\u001b[32m \u001b[39m\u001b[42m\u001b[30mto \u001b[39m\u001b[49m\u001b[32mbe cloned\u001b[39m\n\n at Context. (test/unit/test.spec.js:26:7)\n at callFn (lib/runnable.js:366:21)\n at Test.Runnable.run (lib/runnable.js:354:5)\n at Runner.runTest (lib/runner.js:666:10)\n at /home/runner/work/mocha/mocha/lib/runner.js:789:12\n at next (lib/runner.js:581:14)\n at /home/runner/work/mocha/mocha/lib/runner.js:591:7\n at next (lib/runner.js:474:14)\n at cbHookRun (lib/runner.js:539:7)\n at done (lib/runnable.js:310:5)\n at callFn (lib/runnable.js:389:7)\n at Hook.Runnable.run (lib/runnable.js:354:5)\n at next (lib/runner.js:498:10)\n at Immediate._onImmediate (lib/runner.js:559:5)\n at processImmediate (internal/timers.js:464:21)\n set UNEXPECTED_FULL_TRACE=true to see the full stack trace",
+ "parent": {
+ "errorMode": "default",
+ "parent": {
+ "errorMode": "default",
+ "parent": null,
+ "name": "UnexpectedError",
+ "label": "should equal"
+ },
+ "name": "UnexpectedError"
+ },
+ "name": "UnexpectedError",
+ "message": "\n\u001b[31m\u001b[1mexpected\u001b[22m\u001b[39m \u001b[36m'To be cloned'\u001b[39m \u001b[31m\u001b[1mto be\u001b[22m\u001b[39m \u001b[36m'Not to be cloned'\u001b[39m\n\n\u001b[41m\u001b[30mTo\u001b[39m\u001b[49m\u001b[31m be cloned\u001b[39m\n\u001b[42m\u001b[30mNot\u001b[39m\u001b[49m\u001b[32m \u001b[39m\u001b[42m\u001b[30mto \u001b[39m\u001b[49m\u001b[32mbe cloned\u001b[39m\n",
+ "_hasSerializedErrorMessage": true
+ }
+ }
+ ],
+ "passes": [
+ {
+ "title": "should work",
+ "fullTitle": "Context nested should work",
+ "file": "/home/runner/work/mocha/mocha/test/unit/context.spec.js",
+ "duration": 3,
+ "currentRetry": 0,
+ "speed": "fast",
+ "err": {}
+ },
+ {
+ "title": "should work",
+ "fullTitle": "Context Siblings sequestered sibling should work",
+ "file": "/home/runner/work/mocha/mocha/test/unit/context.spec.js",
+ "duration": 1,
+ "currentRetry": 0,
+ "speed": "fast",
+ "err": {}
+ }
+ ]
+}
diff --git a/python/test/files/mocha/tests.junit-xml b/python/test/files/mocha/tests.junit-xml
new file mode 100644
index 0000000..8654609
--- /dev/null
+++ b/python/test/files/mocha/tests.junit-xml
@@ -0,0 +1,17 @@
+
+
+
+
+
+
+
+
+ (test/unit/runner.spec.js:43:15) at callFn (lib/runnable.js:366:21) at Test.Runnable.run (lib/runnable.js:354:5) at Runner.runTest (lib/runner.js:666:10) at /home/runner/work/mocha/mocha/lib/runner.js:789:12 at next (lib/runner.js:581:14) at /home/runner/work/mocha/mocha/lib/runner.js:591:7 at next (lib/runner.js:474:14) at Immediate._onImmediate (lib/runner.js:559:5) at processImmediate (internal/timers.js:464:21)]]>
+
+
+ (test/unit/test.spec.js:26:7) at callFn (lib/runnable.js:366:21) at Test.Runnable.run (lib/runnable.js:354:5) at Runner.runTest (lib/runner.js:666:10) at /home/runner/work/mocha/mocha/lib/runner.js:789:12 at next (lib/runner.js:581:14) at /home/runner/work/mocha/mocha/lib/runner.js:591:7 at next (lib/runner.js:474:14) at cbHookRun (lib/runner.js:539:7) at done (lib/runnable.js:310:5) at callFn (lib/runnable.js:389:7) at Hook.Runnable.run (lib/runnable.js:354:5) at next (lib/runner.js:498:10) at Immediate._onImmediate (lib/runner.js:559:5) at processImmediate (internal/timers.js:464:21) set UNEXPECTED_FULL_TRACE=true to see the full stack trace]]>
+
+
diff --git a/python/test/files/mocha/tests.results b/python/test/files/mocha/tests.results
new file mode 100644
index 0000000..f60c1e3
--- /dev/null
+++ b/python/test/files/mocha/tests.results
@@ -0,0 +1,121 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=5,
+ suite_skipped=1,
+ suite_failures=1,
+ suite_errors=1,
+ suite_time=12,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name=None,
+ tests=5,
+ skipped=1,
+ failures=1,
+ errors=1,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='tests.json',
+ test_file='/home/runner/work/mocha/mocha/test/unit/context.spec.js',
+ line=None,
+ class_name=None,
+ test_name='Context nested should work',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tests.json',
+ test_file='/home/runner/work/mocha/mocha/test/unit/context.spec.js',
+ line=None,
+ class_name=None,
+ test_name='Context Siblings sequestered sibling should work',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tests.json',
+ test_file='/home/runner/work/mocha/mocha/test/unit/mocha.spec.js',
+ line=None,
+ class_name=None,
+ test_name='Mocha instance method run() should initialize the stats collector',
+ result='skipped',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=None
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tests.json',
+ test_file='/home/runner/work/mocha/mocha/test/unit/runner.spec.js',
+ line=None,
+ class_name=None,
+ test_name='Runner instance method grep() should update the runner.total with '
+ 'number of matched tests',
+ result='failure',
+ message='Required',
+ content='Required\nError: Required at Context. '
+ '(test/unit/runner.spec.js:43:15) at callFn '
+ '(lib/runnable.js:366:21) at Test.Runnable.run '
+ '(lib/runnable.js:354:5) at Runner.runTest (lib/runner.js:666:10) '
+ ' at /home/runner/work/mocha/mocha/lib/runner.js:789:12 at next '
+ '(lib/runner.js:581:14) at '
+ '/home/runner/work/mocha/mocha/lib/runner.js:591:7 at next '
+ '(lib/runner.js:474:14) at Immediate._onImmediate '
+ '(lib/runner.js:559:5) at processImmediate '
+ '(internal/timers.js:464:21)',
+ stdout=None,
+ stderr=None,
+ time=None
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='tests.json',
+ test_file='/home/runner/work/mocha/mocha/test/unit/test.spec.js',
+ line=None,
+ class_name=None,
+ test_name='Test .clone() should copy the title',
+ result='error',
+ message="[31m[1mexpected[22m[39m [36m'To be cloned'[39m [31m[1mto be[22m[39m "
+ "[36m'Not to be cloned'[39m[41m[30mTo[39m[49m[31m be "
+ "cloned[39m[42m[30mNot[39m[49m[32m [39m[42m[30mto [39m[49m[32mbe "
+ "cloned[39m",
+ content="UnexpectedError\n[31m[1mexpected[22m[39m [36m'To be cloned'[39m "
+ "[31m[1mto be[22m[39m [36m'Not to be "
+ "cloned'[39m[41m[30mTo[39m[49m[31m be "
+ "cloned[39m[42m[30mNot[39m[49m[32m [39m[42m[30mto [39m[49m[32mbe "
+ "cloned[39m\nUnexpectedError: [31m[1mexpected[22m[39m [36m'To be "
+ "cloned'[39m [31m[1mto be[22m[39m [36m'Not to be "
+ "cloned'[39m[41m[30mTo[39m[49m[31m be "
+ "cloned[39m[42m[30mNot[39m[49m[32m [39m[42m[30mto [39m[49m[32mbe "
+ "cloned[39m at Context. (test/unit/test.spec.js:26:7) "
+ " at callFn (lib/runnable.js:366:21) at Test.Runnable.run "
+ "(lib/runnable.js:354:5) at Runner.runTest (lib/runner.js:666:10) "
+ " at /home/runner/work/mocha/mocha/lib/runner.js:789:12 at next "
+ "(lib/runner.js:581:14) at "
+ "/home/runner/work/mocha/mocha/lib/runner.js:591:7 at next "
+ "(lib/runner.js:474:14) at cbHookRun (lib/runner.js:539:7) at "
+ "done (lib/runnable.js:310:5) at callFn (lib/runnable.js:389:7) "
+ "at Hook.Runnable.run (lib/runnable.js:354:5) at next "
+ "(lib/runner.js:498:10) at Immediate._onImmediate "
+ "(lib/runner.js:559:5) at processImmediate "
+ "(internal/timers.js:464:21) set UNEXPECTED_FULL_TRACE=true to see "
+ "the full stack trace",
+ stdout=None,
+ stderr=None,
+ time=4.0
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/nunit/mstest/clicketyclackety.annotations b/python/test/files/nunit/mstest/clicketyclackety.annotations
new file mode 100644
index 0000000..17c31d8
--- /dev/null
+++ b/python/test/files/nunit/mstest/clicketyclackety.annotations
@@ -0,0 +1,189 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '10 fail, 12 pass in 0s',
+ 'summary':
+ '\u205f\u20041 files\u2004\u2003\u205f\u20048 suites\u2004\u2003\u2002'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '22 tests\u200312 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '0 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '10 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n23 runs\u2006\u2003'
+ '13 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '0 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '10 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MSw6AIAxEr'
+ '0JYu/CzMV7GENTYCGJKWRnvbiEo7Ppmpu+WG5jVy0l0jZA+ACUYGZaAisCdjC0jFxSrvv'
+ '9g9kHr+FklB1z1ft4UmDgpyYroMG8wnEk55Ps3lqAIE9e+FNQ67awFYsiX8LuSzwvzas/'
+ 'j4wAAAA==\n',
+ 'annotations': [
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'mstest/clicketyclackety.xml\u2003[took 0s]',
+ 'title': 'BakeDrawings failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Assert.Equals should not be '
+ 'used. Use Assert.AreEqual instead.\n at '
+ 'NUnit.Framework.Assert.Equals(Object a, Object b)\n at '
+ 'MyProject.Tests.Real.UserInput.BakeDrawingCommandTests.BakeDrawings'
+ '()'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'mstest/clicketyclackety.xml\u2003[took 0s]',
+ 'title': 'SilentRun failed',
+ 'raw_details':
+ 'System.NullReferenceException : Object reference not set to an '
+ 'instance of an object.\n at '
+ 'MyProject.Tests.Real.UserInput.ProjectInitCommandTests.SilentRun()'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'mstest/clicketyclackety.xml\u2003[took 0s]',
+ 'title': 'DiscardDrawingTests failed',
+ 'raw_details':
+ "System.IO.DirectoryNotFoundException : Could not find a part of "
+ "the path "
+ "'C:\\Users\\USER\\actions-runner\\_work\\MyProject\\MyProject\\SC\\f4a8fa46"
+ "-245d-4cd5-88c1-80fcfbda6369'.\n at "
+ "System.IO.__Error.WinIOError(Int32 errorCode, String "
+ "maybeFullPath)\n at "
+ "System.IO.FileSystemEnumerableIterator`1.CommonInit()\n at "
+ "System.IO.FileSystemEnumerableIterator`1..ctor(String path, String "
+ "originalUserPath, String searchPattern, SearchOption searchOption, "
+ "SearchResultHandler`1 resultHandler, Boolean checkHost)\n at "
+ "System.IO.Directory.GetFiles(String path)\n at "
+ "MyProject.Tests.Real.FlagTests.DiscardDrawingTests()"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'mstest/clicketyclackety.xml\u2003[took 0s]',
+ 'title': 'LoadDrawingsEventFlagTests failed',
+ 'raw_details':
+ ' Expected: 3\n But was: 0\n at '
+ 'MyProject.Tests.Real.FlagTests.LoadDrawingsEventFlagTests()'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'mstest/clicketyclackety.xml\u2003[took 0s]',
+ 'title': 'ResetProjectEventFlagTests failed',
+ 'raw_details':
+ 'System.NullReferenceException : Object reference not set to an '
+ 'instance of an object.\n at '
+ 'MyProject.Tests.Real.FlagTests.ResetProjectEventFlagTests()'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'mstest/clicketyclackety.xml\u2003[took 0s]',
+ 'title': 'SetupLayersEventFlagTests failed',
+ 'raw_details':
+ "om.Exceptions.DocumentException : Document should be initlised, "
+ "but isn't!\n at MyProject.Runtime.Events.SetupLayers.Execute()\n "
+ " at MyProject.Tests.Real.FlagTests.SetupLayersEventFlagTests()"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'mstest/clicketyclackety.xml\u2003[took 0s]',
+ 'title': 'SetupPipeEventFlagTests failed',
+ 'raw_details':
+ 'System.NullReferenceException : Object reference not set to an '
+ 'instance of an object.\n at '
+ 'MyProject.Tests.Real.FlagTests.SetupPipeEventFlagTests()'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'mstest/clicketyclackety.xml\u2003[took 0s]',
+ 'title': 'DrawingConstants failed',
+ 'raw_details':
+ 'System.NullReferenceException : Object reference not set to an '
+ 'instance of an object.\n at '
+ 'MyProject.Tests.Real.RuntimeTests.DrawingConstants()'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'mstest/clicketyclackety.xml\u2003[took 0s]',
+ 'title': 'FileConstants failed',
+ 'raw_details':
+ 'System.NullReferenceException : Object reference not set to an '
+ 'instance of an object.\n at '
+ 'MyProject.Tests.Real.RuntimeTests.FileConstants()'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'mstest/clicketyclackety.xml\u2003[took 0s]',
+ 'title': 'PluginConstants failed',
+ 'raw_details':
+ "System.MissingMethodException : Method not found: 'System.Object "
+ "MyProject.MyProjectPlugIn.get_Instance()'.\n at "
+ "MyProject.Tests.Real.RuntimeTests.PluginConstants()"
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 22 tests, see "Raw output" for the full list of tests.',
+ 'title': '22 tests found',
+ 'raw_details':
+ 'BakeDrawings\nDeleteMyProjectObjectEventFlagTests\n'
+ 'DiscardDrawingTests\nDisplayGraphicConstants\nDrawingConstants\n'
+ 'EventRegisterTests\nFileConstants\nLoadDrawingsEventFlagTests\n'
+ 'LoadedDrawings\nModifyNewObjectUniqueIdEventFlagTests\n'
+ 'MoveControlPointEventFlagTests\nObjectConstants\nPluginConstants\n'
+ 'ResetProjectEventFlagTests\nSetupLayersEventFlagTests\n'
+ 'SetupPipeEventFlagTests\nSilentRun\nTest\nUIPanelConstants\n'
+ 'UIPropertyConstants\nUpdateDrawingsPanelEventFlagTests\n'
+ 'UpdatePropertiesPanelEventFlagTests'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/nunit/mstest/clicketyclackety.junit-xml b/python/test/files/nunit/mstest/clicketyclackety.junit-xml
new file mode 100644
index 0000000..e909256
--- /dev/null
+++ b/python/test/files/nunit/mstest/clicketyclackety.junit-xml
@@ -0,0 +1,96 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ at NUnit.Framework.Assert.Equals(Object a, Object b)
+ at MyProject.Tests.Real.UserInput.BakeDrawingCommandTests.BakeDrawings()
+
+
+
+
+
+ at MyProject.Tests.Real.UserInput.ProjectInitCommandTests.SilentRun()
+
+
+
+
+
+
+
+ at System.IO.__Error.WinIOError(Int32 errorCode, String maybeFullPath)
+ at System.IO.FileSystemEnumerableIterator`1.CommonInit()
+ at System.IO.FileSystemEnumerableIterator`1..ctor(String path, String originalUserPath, String searchPattern, SearchOption searchOption, SearchResultHandler`1 resultHandler, Boolean checkHost)
+ at System.IO.Directory.GetFiles(String path)
+ at MyProject.Tests.Real.FlagTests.DiscardDrawingTests()
+
+
+ at MyProject.Tests.Real.FlagTests.LoadDrawingsEventFlagTests()
+
+
+
+
+
+
+ at MyProject.Tests.Real.FlagTests.ResetProjectEventFlagTests()
+
+
+ at MyProject.Runtime.Events.SetupLayers.Execute()
+ at MyProject.Tests.Real.FlagTests.SetupLayersEventFlagTests()
+
+
+ at MyProject.Tests.Real.FlagTests.SetupPipeEventFlagTests()
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ at MyProject.Tests.Real.RuntimeTests.DrawingConstants()
+
+
+ at MyProject.Tests.Real.RuntimeTests.FileConstants()
+
+
+
+ at MyProject.Tests.Real.RuntimeTests.PluginConstants()
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/nunit/mstest/clicketyclackety.results b/python/test/files/nunit/mstest/clicketyclackety.results
new file mode 100644
index 0000000..ebe15d2
--- /dev/null
+++ b/python/test/files/nunit/mstest/clicketyclackety.results
@@ -0,0 +1,406 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=8,
+ suite_tests=23,
+ suite_skipped=0,
+ suite_failures=10,
+ suite_errors=0,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='MyProject.Tests.Real.UserInput.BakeDrawingCommandTests',
+ tests=1,
+ skipped=0,
+ failures=1,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='MyProject.Tests.Real.UserInput.ProjectInitCommandTests',
+ tests=1,
+ skipped=0,
+ failures=1,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='MyProject.Tests.Real.FlagTests',
+ tests=10,
+ skipped=0,
+ failures=5,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='MyProject.Tests.Real.NewDocumentTests',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='MyProject.Tests.Real.PipeTests',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='MyProject.Tests.Real.RuntimeTests',
+ tests=7,
+ skipped=0,
+ failures=3,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='MyProject.Tests.Real.SwitchingTests',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='MyProject.Tests.Real.Tests.RuntimeConstants',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BakeDrawings',
+ result='failure',
+ message='System.InvalidOperationException : Assert.Equals should not be used. '
+ 'Use Assert.AreEqual instead.',
+ content=' at NUnit.Framework.Assert.Equals(Object a, Object b)\n at '
+ 'MyProject.Tests.Real.UserInput.BakeDrawingCommandTests.BakeDrawings()',
+ stdout=None,
+ stderr=None,
+ time=0.135485
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='SilentRun',
+ result='failure',
+ message='System.NullReferenceException : Object reference not set to an '
+ 'instance of an object.',
+ content=' at '
+ 'MyProject.Tests.Real.UserInput.ProjectInitCommandTests.SilentRun()',
+ stdout=None,
+ stderr=None,
+ time=0.052338
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='DeleteMyProjectObjectEventFlagTests',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.000201
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='DiscardDrawingTests',
+ result='failure',
+ message="System.IO.DirectoryNotFoundException : Could not find a part of the "
+ "path "
+ "'C:\\Users\\USER\\actions-runner\\_work\\MyProject\\MyProject\\SC\\f4a8fa46-2"
+ "45d-4cd5-88c1-80fcfbda6369'.",
+ content=' at System.IO.__Error.WinIOError(Int32 errorCode, String '
+ 'maybeFullPath)\n at '
+ 'System.IO.FileSystemEnumerableIterator`1.CommonInit()\n at '
+ 'System.IO.FileSystemEnumerableIterator`1..ctor(String path, String '
+ 'originalUserPath, String searchPattern, SearchOption searchOption, '
+ 'SearchResultHandler`1 resultHandler, Boolean checkHost)\n at '
+ 'System.IO.Directory.GetFiles(String path)\n at '
+ 'MyProject.Tests.Real.FlagTests.DiscardDrawingTests()',
+ stdout=None,
+ stderr=None,
+ time=0.004832
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='LoadDrawingsEventFlagTests',
+ result='failure',
+ message=' Expected: 3\n But was: 0\n',
+ content=' at MyProject.Tests.Real.FlagTests.LoadDrawingsEventFlagTests()\n',
+ stdout=None,
+ stderr=None,
+ time=0.057537
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='ModifyNewObjectUniqueIdEventFlagTests',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.000104
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='MoveControlPointEventFlagTests',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.000112
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='ResetProjectEventFlagTests',
+ result='failure',
+ message='System.NullReferenceException : Object reference not set to an '
+ 'instance of an object.',
+ content=' at MyProject.Tests.Real.FlagTests.ResetProjectEventFlagTests()',
+ stdout=None,
+ stderr=None,
+ time=0.025094
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='SetupLayersEventFlagTests',
+ result='failure',
+ message="om.Exceptions.DocumentException : Document should be initlised, but "
+ "isn't!",
+ content=' at MyProject.Runtime.Events.SetupLayers.Execute()\n at '
+ 'MyProject.Tests.Real.FlagTests.SetupLayersEventFlagTests()',
+ stdout=None,
+ stderr=None,
+ time=0.00231
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='SetupPipeEventFlagTests',
+ result='failure',
+ message='System.NullReferenceException : Object reference not set to an '
+ 'instance of an object.',
+ content=' at MyProject.Tests.Real.FlagTests.SetupPipeEventFlagTests()',
+ stdout=None,
+ stderr=None,
+ time=0.233069
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UpdateDrawingsPanelEventFlagTests',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.000363
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UpdatePropertiesPanelEventFlagTests',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.8e-05
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='EventRegisterTests',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005957
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='Test',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.000102
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='DisplayGraphicConstants',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=4.6e-05
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='DrawingConstants',
+ result='failure',
+ message='System.NullReferenceException : Object reference not set to an '
+ 'instance of an object.',
+ content=' at MyProject.Tests.Real.RuntimeTests.DrawingConstants()',
+ stdout=None,
+ stderr=None,
+ time=0.001262
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='FileConstants',
+ result='failure',
+ message='System.NullReferenceException : Object reference not set to an '
+ 'instance of an object.',
+ content=' at MyProject.Tests.Real.RuntimeTests.FileConstants()',
+ stdout=None,
+ stderr=None,
+ time=0.001455
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='ObjectConstants',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.00029
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='PluginConstants',
+ result='failure',
+ message="System.MissingMethodException : Method not found: 'System.Object "
+ "MyProject.MyProjectPlugIn.get_Instance()'.",
+ content=' at MyProject.Tests.Real.RuntimeTests.PluginConstants()',
+ stdout=None,
+ stderr=None,
+ time=0.005593
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UIPanelConstants',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.007398
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UIPropertyConstants',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.000517
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='Test',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.000254
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/clicketyclackety.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='LoadedDrawings',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.000986
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/nunit/mstest/clicketyclackety.xml b/python/test/files/nunit/mstest/clicketyclackety.xml
new file mode 100644
index 0000000..bb3059b
--- /dev/null
+++ b/python/test/files/nunit/mstest/clicketyclackety.xml
@@ -0,0 +1,160 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/nunit/mstest/pickles.annotations b/python/test/files/nunit/mstest/pickles.annotations
new file mode 100644
index 0000000..b3ee25b
--- /dev/null
+++ b/python/test/files/nunit/mstest/pickles.annotations
@@ -0,0 +1,79 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '1 fail, 3 pass in 0s',
+ 'summary':
+ '4 tests\u2002\u2003\u20033 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '2 suites\u2003\u20030 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20031 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr'
+ '0KoLfxVXoYQxLiRj1mgMt7dFSHSzZuZvItvYHTgCxs6xkOCmGEkWBPKCN4R9oQ0xHeaax'
+ 'YhKUXF9BcHnO1bbBJMUX+FRvRYLphc9b2x1X382zI3ssytS3lrIRKUxMIu+f0AuKmg790'
+ 'AAAA=\n',
+ 'annotations': [
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'mstest/pickles.xml\u2003[took 0s]',
+ 'title': 'Pickles.TestHarness.AdditionFeature.FailToAddTwoNumbers failed',
+ 'raw_details':
+ '\n at '
+ 'Pickles.TestHarness.xUnit.Steps.ThenTheResultShouldBePass(Int32 '
+ 'result) in '
+ 'C:\\dev\\pickles-results-harness\\Pickles.TestHarness\\Pickles.TestHarn'
+ 'ess.NUnit\\Steps.cs:line 26\nat lambda_method(Closure , '
+ 'IContextManager , Int32 )\nat '
+ 'TechTalk.SpecFlow.Bindings.MethodBinding.InvokeAction(IContextManag'
+ 'er contextManager, Object[] arguments, ITestTracer testTracer, '
+ 'TimeSpan& duration)\nat '
+ 'TechTalk.SpecFlow.Bindings.StepDefinitionBinding.Invoke(IContextMan'
+ 'ager contextManager, ITestTracer testTracer, Object[] arguments, '
+ 'TimeSpan& duration)\nat '
+ 'TechTalk.SpecFlow.Infrastructure.TestExecutionEngine.ExecuteStepMat'
+ 'ch(BindingMatch match, Object[] arguments)\nat '
+ 'TechTalk.SpecFlow.Infrastructure.TestExecutionEngine.ExecuteStep(St'
+ 'epArgs stepArgs)\nat '
+ 'TechTalk.SpecFlow.Infrastructure.TestExecutionEngine.OnAfterLastSte'
+ 'p()\nat TechTalk.SpecFlow.TestRunner.CollectScenarioErrors()\nat '
+ 'Pickles.TestHarness.AdditionFeature.ScenarioCleanup() in '
+ 'C:\\dev\\pickles-results-harness\\Pickles.TestHarness\\Pickles.TestHarn'
+ 'ess.NUnit\\Addition.feature.cs:line 0\nat '
+ 'Pickles.TestHarness.AdditionFeature.FailToAddTwoNumbers() in '
+ 'c:\\dev\\pickles-results-harness\\Pickles.TestHarness\\Pickles.TestHarn'
+ 'ess.NUnit\\Addition.feature:line 18'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 4 tests, see "Raw output" for the full list of tests.',
+ 'title': '4 tests found',
+ 'raw_details':
+ 'Pickles.TestHarness.AdditionFeature.AddTwoNumbers\n'
+ 'Pickles.TestHarness.AdditionFeature.AddingSeveralNumbers("40","50",'
+ '"90",System.String[])\n'
+ 'Pickles.TestHarness.AdditionFeature.AddingSeveralNumbers("60","70",'
+ '"130",System.String[])\n'
+ 'Pickles.TestHarness.AdditionFeature.FailToAddTwoNumbers'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/nunit/mstest/pickles.junit-xml b/python/test/files/nunit/mstest/pickles.junit-xml
new file mode 100644
index 0000000..2e954f6
--- /dev/null
+++ b/python/test/files/nunit/mstest/pickles.junit-xml
@@ -0,0 +1,53 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ at Pickles.TestHarness.xUnit.Steps.ThenTheResultShouldBePass(Int32 result) in C:\dev\pickles-results-harness\Pickles.TestHarness\Pickles.TestHarness.NUnit\Steps.cs:line 26
+at lambda_method(Closure , IContextManager , Int32 )
+at TechTalk.SpecFlow.Bindings.MethodBinding.InvokeAction(IContextManager contextManager, Object[] arguments, ITestTracer testTracer, TimeSpan& duration)
+at TechTalk.SpecFlow.Bindings.StepDefinitionBinding.Invoke(IContextManager contextManager, ITestTracer testTracer, Object[] arguments, TimeSpan& duration)
+at TechTalk.SpecFlow.Infrastructure.TestExecutionEngine.ExecuteStepMatch(BindingMatch match, Object[] arguments)
+at TechTalk.SpecFlow.Infrastructure.TestExecutionEngine.ExecuteStep(StepArgs stepArgs)
+at TechTalk.SpecFlow.Infrastructure.TestExecutionEngine.OnAfterLastStep()
+at TechTalk.SpecFlow.TestRunner.CollectScenarioErrors()
+at Pickles.TestHarness.AdditionFeature.ScenarioCleanup() in C:\dev\pickles-results-harness\Pickles.TestHarness\Pickles.TestHarness.NUnit\Addition.feature.cs:line 0
+at Pickles.TestHarness.AdditionFeature.FailToAddTwoNumbers() in c:\dev\pickles-results-harness\Pickles.TestHarness\Pickles.TestHarness.NUnit\Addition.feature:line 18
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/nunit/mstest/pickles.results b/python/test/files/nunit/mstest/pickles.results
new file mode 100644
index 0000000..5e3f4d9
--- /dev/null
+++ b/python/test/files/nunit/mstest/pickles.results
@@ -0,0 +1,109 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=2,
+ suite_tests=4,
+ suite_skipped=0,
+ suite_failures=1,
+ suite_errors=0,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='Pickles.TestHarness.AddingSeveralNumbers',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='Pickles.TestHarness.AdditionFeature',
+ tests=4,
+ skipped=0,
+ failures=1,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/pickles.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='Pickles.TestHarness.AdditionFeature.AddingSeveralNumbers("60","70","'
+ '130",System.String[])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.137
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/pickles.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='Pickles.TestHarness.AdditionFeature.AddingSeveralNumbers("40","50","'
+ '90",System.String[])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.009
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/pickles.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='Pickles.TestHarness.AdditionFeature.AddTwoNumbers',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/pickles.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='Pickles.TestHarness.AdditionFeature.FailToAddTwoNumbers',
+ result='failure',
+ message=None,
+ content='\n at '
+ 'Pickles.TestHarness.xUnit.Steps.ThenTheResultShouldBePass(Int32 '
+ 'result) in '
+ 'C:\\dev\\pickles-results-harness\\Pickles.TestHarness\\Pickles.TestHarnes'
+ 's.NUnit\\Steps.cs:line 26\nat lambda_method(Closure , '
+ 'IContextManager , Int32 )\nat '
+ 'TechTalk.SpecFlow.Bindings.MethodBinding.InvokeAction(IContextManager'
+ ' contextManager, Object[] arguments, ITestTracer testTracer, '
+ 'TimeSpan& duration)\nat '
+ 'TechTalk.SpecFlow.Bindings.StepDefinitionBinding.Invoke(IContextManag'
+ 'er contextManager, ITestTracer testTracer, Object[] arguments, '
+ 'TimeSpan& duration)\nat '
+ 'TechTalk.SpecFlow.Infrastructure.TestExecutionEngine.ExecuteStepMatch'
+ '(BindingMatch match, Object[] arguments)\nat '
+ 'TechTalk.SpecFlow.Infrastructure.TestExecutionEngine.ExecuteStep(Step'
+ 'Args stepArgs)\nat '
+ 'TechTalk.SpecFlow.Infrastructure.TestExecutionEngine.OnAfterLastStep('
+ ')\nat TechTalk.SpecFlow.TestRunner.CollectScenarioErrors()\nat '
+ 'Pickles.TestHarness.AdditionFeature.ScenarioCleanup() in '
+ 'C:\\dev\\pickles-results-harness\\Pickles.TestHarness\\Pickles.TestHarnes'
+ 's.NUnit\\Addition.feature.cs:line 0\nat '
+ 'Pickles.TestHarness.AdditionFeature.FailToAddTwoNumbers() in '
+ 'c:\\dev\\pickles-results-harness\\Pickles.TestHarness\\Pickles.TestHarnes'
+ 's.NUnit\\Addition.feature:line 18\n\n ',
+ stdout=None,
+ stderr=None,
+ time=0.028
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/nunit/mstest/pickles.xml b/python/test/files/nunit/mstest/pickles.xml
new file mode 100644
index 0000000..8b245a7
--- /dev/null
+++ b/python/test/files/nunit/mstest/pickles.xml
@@ -0,0 +1,74 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/nunit/mstest/timewarpinc.annotations b/python/test/files/nunit/mstest/timewarpinc.annotations
new file mode 100644
index 0000000..94bd876
--- /dev/null
+++ b/python/test/files/nunit/mstest/timewarpinc.annotations
@@ -0,0 +1,122 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '1 fail in 2s',
+ 'summary':
+ '1 tests\u2002\u2003\u20030 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '2s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20030 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20031 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMQQ6AIAwEv'
+ '0I4e1CPfsYQhNiIYAqcjH+3IgjednbbObkGozyf2NAx7iOED5aIIoCzhCMhDaFMKc8+Sk'
+ 'lFX4sNjl+hBZjfi0J0mE8w2uJ7Yqt7udoSN7LErUu6fYdAkBPzq+DXDXGDl7HdAAAA\n',
+ 'annotations': [
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'mstest/timewarpinc.xml\u2003[took 2s]',
+ 'title': 'ValidateSceneContainer("Assets/Scenes/Grid/GridTest.unity") failed',
+ 'raw_details':
+ 'Zenject.ZenjectException : Zenject Validation Failed! See errors '
+ 'below for details.\n at '
+ 'Zenject.Internal.ZenUnityEditorUtil.ValidateCurrentSceneSetup () '
+ '[0x0009c] in '
+ '/github/workspace/Assets/ThirdParty/Zenject/Source/Editor/ZenUnityE'
+ 'ditorUtil.cs:82\n at '
+ 'MP.Tests.AssetValidatorTest.ValidateSceneContainer (System.String '
+ 'scenePath) [0x00009] in '
+ '/github/workspace/Assets/Tests/EditorMode/AssetValidatorTest.cs:58\n'
+ ' at (wrapper managed-to-native) '
+ 'System.Reflection.RuntimeMethodInfo.InternalInvoke(System.Reflectio'
+ 'n.RuntimeMethodInfo,object,object[],System.Exception&)\n at '
+ 'System.Reflection.RuntimeMethodInfo.Invoke (System.Object obj, '
+ 'System.Reflection.BindingFlags invokeAttr, '
+ 'System.Reflection.Binder binder, System.Object[] parameters, '
+ 'System.Globalization.CultureInfo culture) [0x0006a] in '
+ ':0\nAssertionException: Could '
+ 'not find a tilemap tagged with LevelBounds.\nAssertion failure. '
+ 'Value was Null\nExpected: Value was not Null\n'
+ 'UnityEngine.Assertions.Assert.Fail (System.String message, '
+ 'System.String userMessage) (at '
+ '/home/bokken/buildslave/unity/build/Runtime/Export/Assertions/Asser'
+ 't/AssertBase.cs:29)\nUnityEngine.Assertions.Assert.IsNotNull '
+ '(UnityEngine.Object value, System.String message) (at '
+ '/home/bokken/buildslave/unity/build/Runtime/Export/Assertions/Asser'
+ 't/AssertNull.cs:58)\nUnityEngine.Assertions.Assert.IsNotNull[T] (T '
+ 'value, System.String message) (at '
+ '/home/bokken/buildslave/unity/build/Runtime/Export/Assertions/Asser'
+ 't/AssertNull.cs:46)\n'
+ 'MP.Gameplay.Level.LevelInstaller.InstallBindings () (at '
+ 'Assets/Scripts/Gameplay/Level/LevelInstaller.cs:30)\n'
+ 'Zenject.CompositeMonoInstaller.InstallBindings () (at '
+ 'Assets/ThirdParty/Zenject/Source/Install/CompositeMonoInstaller.cs:'
+ '25)\nZenject.Context.InstallInstallers '
+ '(System.Collections.Generic.List`1[T] normalInstallers, '
+ 'System.Collections.Generic.List`1[T] normalInstallerTypes, '
+ 'System.Collections.Generic.List`1[T] scriptableObjectInstallers, '
+ 'System.Collections.Generic.List`1[T] installers, '
+ 'System.Collections.Generic.List`1[T] installerPrefabs) (at '
+ 'Assets/ThirdParty/Zenject/Source/Install/Contexts/Context.cs:218)\n'
+ 'Zenject.Context.InstallInstallers () (at '
+ 'Assets/ThirdParty/Zenject/Source/Install/Contexts/Context.cs:139)\n'
+ 'Zenject.SceneContext.InstallBindings '
+ '(System.Collections.Generic.List`1[T] injectableMonoBehaviours) '
+ '(at '
+ 'Assets/ThirdParty/Zenject/Source/Install/Contexts/SceneContext.cs:3'
+ '46)\nZenject.SceneContext.Install () (at '
+ 'Assets/ThirdParty/Zenject/Source/Install/Contexts/SceneContext.cs:2'
+ '65)\nZenject.SceneContext.Validate () (at '
+ 'Assets/ThirdParty/Zenject/Source/Install/Contexts/SceneContext.cs:1'
+ '21)\nZenject.Internal.ZenUnityEditorUtil.ValidateCurrentSceneSetup '
+ '() (at '
+ 'Assets/ThirdParty/Zenject/Source/Editor/ZenUnityEditorUtil.cs:67)\n'
+ 'UnityEngine.Debug:LogException(Exception)\n'
+ 'ModestTree.Log:ErrorException(Exception) (at '
+ 'Assets/ThirdParty/Zenject/Source/Internal/Log.cs:60)\n'
+ 'Zenject.Internal.ZenUnityEditorUtil:ValidateCurrentSceneSetup() '
+ '(at '
+ 'Assets/ThirdParty/Zenject/Source/Editor/ZenUnityEditorUtil.cs:72)\n'
+ 'MP.Tests.AssetValidatorTest:ValidateSceneContainer(String) (at '
+ 'Assets/Tests/EditorMode/AssetValidatorTest.cs:58)\n'
+ 'System.Reflection.MethodBase:Invoke(Object, Object[])\n'
+ 'NUnit.Framework.Internal.Reflect:InvokeMethod(MethodInfo, Object, '
+ 'Object[])\nNUnit.Framework.Internal.MethodWrapper:Invoke(Object, '
+ 'Object[])\n'
+ 'NUnit.Framework.Internal.Commands.TestMethodCommand:RunNonAsyncTest'
+ 'Method(ITestExecutionContext)\n'
+ 'NUnit.Framework.Internal.Commands.TestMethodCommand:RunTestMethod(I'
+ 'TestExecutionContext)\n'
+ 'NUnit.Framework.Internal.Commands.TestMethodCommand:Execute(ITestEx'
+ 'ecutionContext)\n'
+ 'UnityEditor.EditorApplication:Internal_CallUpdateFunctions() (at '
+ '/home/bokken/buildslave/unity/build/Editor/Mono/EditorApplication.c'
+ 's:359)'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There is 1 test, see "Raw output" for the name of the test.',
+ 'title': '1 test found',
+ 'raw_details': 'ValidateSceneContainer("Assets/Scenes/Grid/GridTest.unity")'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/nunit/mstest/timewarpinc.junit-xml b/python/test/files/nunit/mstest/timewarpinc.junit-xml
new file mode 100644
index 0000000..81486b8
--- /dev/null
+++ b/python/test/files/nunit/mstest/timewarpinc.junit-xml
@@ -0,0 +1,62 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ at Zenject.Internal.ZenUnityEditorUtil.ValidateCurrentSceneSetup () [0x0009c] in /github/workspace/Assets/ThirdParty/Zenject/Source/Editor/ZenUnityEditorUtil.cs:82
+ at MP.Tests.AssetValidatorTest.ValidateSceneContainer (System.String scenePath) [0x00009] in /github/workspace/Assets/Tests/EditorMode/AssetValidatorTest.cs:58
+ at (wrapper managed-to-native) System.Reflection.RuntimeMethodInfo.InternalInvoke(System.Reflection.RuntimeMethodInfo,object,object[],System.Exception&)
+ at System.Reflection.RuntimeMethodInfo.Invoke (System.Object obj, System.Reflection.BindingFlags invokeAttr, System.Reflection.Binder binder, System.Object[] parameters, System.Globalization.CultureInfo culture) [0x0006a] in <b67d2f60bf2548a58dc569b37fe71c3d>:0
+ AssertionException: Could not find a tilemap tagged with LevelBounds.
+Assertion failure. Value was Null
+Expected: Value was not Null
+UnityEngine.Assertions.Assert.Fail (System.String message, System.String userMessage) (at /home/bokken/buildslave/unity/build/Runtime/Export/Assertions/Assert/AssertBase.cs:29)
+UnityEngine.Assertions.Assert.IsNotNull (UnityEngine.Object value, System.String message) (at /home/bokken/buildslave/unity/build/Runtime/Export/Assertions/Assert/AssertNull.cs:58)
+UnityEngine.Assertions.Assert.IsNotNull[T] (T value, System.String message) (at /home/bokken/buildslave/unity/build/Runtime/Export/Assertions/Assert/AssertNull.cs:46)
+MP.Gameplay.Level.LevelInstaller.InstallBindings () (at Assets/Scripts/Gameplay/Level/LevelInstaller.cs:30)
+Zenject.CompositeMonoInstaller.InstallBindings () (at Assets/ThirdParty/Zenject/Source/Install/CompositeMonoInstaller.cs:25)
+Zenject.Context.InstallInstallers (System.Collections.Generic.List`1[T] normalInstallers, System.Collections.Generic.List`1[T] normalInstallerTypes, System.Collections.Generic.List`1[T] scriptableObjectInstallers, System.Collections.Generic.List`1[T] installers, System.Collections.Generic.List`1[T] installerPrefabs) (at Assets/ThirdParty/Zenject/Source/Install/Contexts/Context.cs:218)
+Zenject.Context.InstallInstallers () (at Assets/ThirdParty/Zenject/Source/Install/Contexts/Context.cs:139)
+Zenject.SceneContext.InstallBindings (System.Collections.Generic.List`1[T] injectableMonoBehaviours) (at Assets/ThirdParty/Zenject/Source/Install/Contexts/SceneContext.cs:346)
+Zenject.SceneContext.Install () (at Assets/ThirdParty/Zenject/Source/Install/Contexts/SceneContext.cs:265)
+Zenject.SceneContext.Validate () (at Assets/ThirdParty/Zenject/Source/Install/Contexts/SceneContext.cs:121)
+Zenject.Internal.ZenUnityEditorUtil.ValidateCurrentSceneSetup () (at Assets/ThirdParty/Zenject/Source/Editor/ZenUnityEditorUtil.cs:67)
+UnityEngine.Debug:LogException(Exception)
+ModestTree.Log:ErrorException(Exception) (at Assets/ThirdParty/Zenject/Source/Internal/Log.cs:60)
+Zenject.Internal.ZenUnityEditorUtil:ValidateCurrentSceneSetup() (at Assets/ThirdParty/Zenject/Source/Editor/ZenUnityEditorUtil.cs:72)
+MP.Tests.AssetValidatorTest:ValidateSceneContainer(String) (at Assets/Tests/EditorMode/AssetValidatorTest.cs:58)
+System.Reflection.MethodBase:Invoke(Object, Object[])
+NUnit.Framework.Internal.Reflect:InvokeMethod(MethodInfo, Object, Object[])
+NUnit.Framework.Internal.MethodWrapper:Invoke(Object, Object[])
+NUnit.Framework.Internal.Commands.TestMethodCommand:RunNonAsyncTestMethod(ITestExecutionContext)
+NUnit.Framework.Internal.Commands.TestMethodCommand:RunTestMethod(ITestExecutionContext)
+NUnit.Framework.Internal.Commands.TestMethodCommand:Execute(ITestExecutionContext)
+UnityEditor.EditorApplication:Internal_CallUpdateFunctions() (at /home/bokken/buildslave/unity/build/Editor/Mono/EditorApplication.cs:359)
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/nunit/mstest/timewarpinc.results b/python/test/files/nunit/mstest/timewarpinc.results
new file mode 100644
index 0000000..36df5f9
--- /dev/null
+++ b/python/test/files/nunit/mstest/timewarpinc.results
@@ -0,0 +1,103 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=1,
+ suite_skipped=0,
+ suite_failures=1,
+ suite_errors=0,
+ suite_time=2,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='MP.Tests.AssetValidatorTest.ValidateSceneContainer',
+ tests=1,
+ skipped=0,
+ failures=1,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='mstest/timewarpinc.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='ValidateSceneContainer("Assets/Scenes/Grid/GridTest.unity")',
+ result='failure',
+ message='Zenject.ZenjectException : Zenject Validation Failed! See errors '
+ 'below for details.',
+ content=' at Zenject.Internal.ZenUnityEditorUtil.ValidateCurrentSceneSetup '
+ '() [0x0009c] in '
+ '/github/workspace/Assets/ThirdParty/Zenject/Source/Editor/ZenUnityEdi'
+ 'torUtil.cs:82\n at '
+ 'MP.Tests.AssetValidatorTest.ValidateSceneContainer (System.String '
+ 'scenePath) [0x00009] in '
+ '/github/workspace/Assets/Tests/EditorMode/AssetValidatorTest.cs:58\n '
+ 'at (wrapper managed-to-native) '
+ 'System.Reflection.RuntimeMethodInfo.InternalInvoke(System.Reflection.'
+ 'RuntimeMethodInfo,object,object[],System.Exception&)\n at '
+ 'System.Reflection.RuntimeMethodInfo.Invoke (System.Object obj, '
+ 'System.Reflection.BindingFlags invokeAttr, System.Reflection.Binder '
+ 'binder, System.Object[] parameters, System.Globalization.CultureInfo '
+ 'culture) [0x0006a] in :0 ',
+ stdout='AssertionException: Could not find a tilemap tagged with '
+ 'LevelBounds.\nAssertion failure. Value was Null\nExpected: Value was '
+ 'not Null\nUnityEngine.Assertions.Assert.Fail (System.String message, '
+ 'System.String userMessage) (at '
+ '/home/bokken/buildslave/unity/build/Runtime/Export/Assertions/Assert/'
+ 'AssertBase.cs:29)\nUnityEngine.Assertions.Assert.IsNotNull '
+ '(UnityEngine.Object value, System.String message) (at '
+ '/home/bokken/buildslave/unity/build/Runtime/Export/Assertions/Assert/'
+ 'AssertNull.cs:58)\nUnityEngine.Assertions.Assert.IsNotNull[T] (T '
+ 'value, System.String message) (at '
+ '/home/bokken/buildslave/unity/build/Runtime/Export/Assertions/Assert/'
+ 'AssertNull.cs:46)\nMP.Gameplay.Level.LevelInstaller.InstallBindings '
+ '() (at Assets/Scripts/Gameplay/Level/LevelInstaller.cs:30)\n'
+ 'Zenject.CompositeMonoInstaller.InstallBindings () (at '
+ 'Assets/ThirdParty/Zenject/Source/Install/CompositeMonoInstaller.cs:25'
+ ')\nZenject.Context.InstallInstallers '
+ '(System.Collections.Generic.List`1[T] normalInstallers, '
+ 'System.Collections.Generic.List`1[T] normalInstallerTypes, '
+ 'System.Collections.Generic.List`1[T] scriptableObjectInstallers, '
+ 'System.Collections.Generic.List`1[T] installers, '
+ 'System.Collections.Generic.List`1[T] installerPrefabs) (at '
+ 'Assets/ThirdParty/Zenject/Source/Install/Contexts/Context.cs:218)\n'
+ 'Zenject.Context.InstallInstallers () (at '
+ 'Assets/ThirdParty/Zenject/Source/Install/Contexts/Context.cs:139)\n'
+ 'Zenject.SceneContext.InstallBindings '
+ '(System.Collections.Generic.List`1[T] injectableMonoBehaviours) (at '
+ 'Assets/ThirdParty/Zenject/Source/Install/Contexts/SceneContext.cs:346'
+ ')\nZenject.SceneContext.Install () (at '
+ 'Assets/ThirdParty/Zenject/Source/Install/Contexts/SceneContext.cs:265'
+ ')\nZenject.SceneContext.Validate () (at '
+ 'Assets/ThirdParty/Zenject/Source/Install/Contexts/SceneContext.cs:121'
+ ')\nZenject.Internal.ZenUnityEditorUtil.ValidateCurrentSceneSetup () '
+ '(at '
+ 'Assets/ThirdParty/Zenject/Source/Editor/ZenUnityEditorUtil.cs:67)\n'
+ 'UnityEngine.Debug:LogException(Exception)\n'
+ 'ModestTree.Log:ErrorException(Exception) (at '
+ 'Assets/ThirdParty/Zenject/Source/Internal/Log.cs:60)\n'
+ 'Zenject.Internal.ZenUnityEditorUtil:ValidateCurrentSceneSetup() (at '
+ 'Assets/ThirdParty/Zenject/Source/Editor/ZenUnityEditorUtil.cs:72)\n'
+ 'MP.Tests.AssetValidatorTest:ValidateSceneContainer(String) (at '
+ 'Assets/Tests/EditorMode/AssetValidatorTest.cs:58)\n'
+ 'System.Reflection.MethodBase:Invoke(Object, Object[])\n'
+ 'NUnit.Framework.Internal.Reflect:InvokeMethod(MethodInfo, Object, '
+ 'Object[])\nNUnit.Framework.Internal.MethodWrapper:Invoke(Object, '
+ 'Object[])\n'
+ 'NUnit.Framework.Internal.Commands.TestMethodCommand:RunNonAsyncTestMe'
+ 'thod(ITestExecutionContext)\n'
+ 'NUnit.Framework.Internal.Commands.TestMethodCommand:RunTestMethod(ITe'
+ 'stExecutionContext)\n'
+ 'NUnit.Framework.Internal.Commands.TestMethodCommand:Execute(ITestExec'
+ 'utionContext)\n'
+ 'UnityEditor.EditorApplication:Internal_CallUpdateFunctions() (at '
+ '/home/bokken/buildslave/unity/build/Editor/Mono/EditorApplication.cs:'
+ '359)\n\n',
+ stderr=None,
+ time=2.117365
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/nunit/mstest/timewarpinc.xml b/python/test/files/nunit/mstest/timewarpinc.xml
new file mode 100644
index 0000000..ca256b6
--- /dev/null
+++ b/python/test/files/nunit/mstest/timewarpinc.xml
@@ -0,0 +1,86 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ :0 ]]>
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations
new file mode 100644
index 0000000..c396e30
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.annotations
@@ -0,0 +1,115 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '1 errors, 1 fail, 8 skipped, 18 pass in 0s',
+ 'summary':
+ '28 tests\u2002\u2003\u200318 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '11 suites\u2003\u2003\u205f\u20048 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004'
+ '1 files\u2004\u2002\u2003\u2003\u205f\u20041 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\u2003\u20031 '
+ '[:fire:](https://github.com/step-security/publish-unit-test-result-ac'
+ 'tion/blob/VERSION/README.md#the-symbols "test errors")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIAxFr'
+ '0KYHdTJeBlDUGKjiGlhMt7dIqi49b3fvEMaWCeSvWgqISmATxBpDKg8uI25ZuTFx63tHh'
+ 'goaB2/C7PAzuYTRsGa60lMiA6zwbC9xXj/gkl8vZuL3M1lTTtrwTPkS9Cs5HkBSPFg+uI'
+ 'AAAA=\n',
+ 'annotations': [
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-correct.xml\u2003[took 0s]',
+ 'title': 'NUnit.Tests.Assemblies.MockTestFixture.FailingTest failed',
+ 'raw_details':
+ 'Intentional failure\n\n '
+ ' at NUnit.Tests.Assemblies.MockTestFixture.FailingTest () '
+ '[0x00000] in '
+ '/home/charlie/Dev/NUnit/nunit-2.5/work/src/tests/mock-assembly/Mock'
+ 'Assembly.cs:121'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'failure',
+ 'message': 'nunit3/jenkins/NUnit-correct.xml\u2003[took 0s]',
+ 'title': 'NUnit.Tests.Assemblies.MockTestFixture.TestWithException with error',
+ 'raw_details':
+ 'System.ApplicationException : Intentional Exception\n\n '
+ ' at '
+ 'NUnit.Tests.Assemblies.MockTestFixture.TestWithException () '
+ '[0x00000] in '
+ '/home/charlie/Dev/NUnit/nunit-2.5/work/src/tests/mock-assembly/Mock'
+ 'Assembly.cs:153'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There are 8 skipped tests, see "Raw output" for the full list of '
+ 'skipped tests.',
+ 'title': '8 skipped tests found',
+ 'raw_details':
+ 'NUnit.Tests.Assemblies.MockTestFixture.InconclusiveTest\n'
+ 'NUnit.Tests.Assemblies.MockTestFixture.MockTest4\n'
+ 'NUnit.Tests.Assemblies.MockTestFixture.MockTest5\n'
+ 'NUnit.Tests.Assemblies.MockTestFixture.NotRunnableTest\n'
+ 'NUnit.Tests.BadFixture.SomeTest\nNUnit.Tests.IgnoredFixture.Test1\n'
+ 'NUnit.Tests.IgnoredFixture.Test2\nNUnit.Tests.IgnoredFixture.Test3'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 28 tests, see "Raw output" for the full list of tests.',
+ 'title': '28 tests found',
+ 'raw_details':
+ 'NUnit.Tests.Assemblies.MockTestFixture.FailingTest\n'
+ 'NUnit.Tests.Assemblies.MockTestFixture.InconclusiveTest\n'
+ 'NUnit.Tests.Assemblies.MockTestFixture.MockTest1\n'
+ 'NUnit.Tests.Assemblies.MockTestFixture.MockTest2\n'
+ 'NUnit.Tests.Assemblies.MockTestFixture.MockTest3\n'
+ 'NUnit.Tests.Assemblies.MockTestFixture.MockTest4\n'
+ 'NUnit.Tests.Assemblies.MockTestFixture.MockTest5\n'
+ 'NUnit.Tests.Assemblies.MockTestFixture.NotRunnableTest\n'
+ 'NUnit.Tests.Assemblies.MockTestFixture.TestWithException\n'
+ 'NUnit.Tests.Assemblies.MockTestFixture.TestWithManyProperties\n'
+ 'NUnit.Tests.BadFixture.SomeTest\n'
+ 'NUnit.Tests.FixtureWithTestCases.GenericMethod(9.2d,11.7d)\n'
+ 'NUnit.Tests.FixtureWithTestCases.GenericMethod(2,4)\n'
+ 'NUnit.Tests.FixtureWithTestCases.MethodWithParameters(2,2)\n'
+ 'NUnit.Tests.FixtureWithTestCases.MethodWithParameters(9,11)\n'
+ 'NUnit.Tests.GenericFixture(11.5d).Test1\n'
+ 'NUnit.Tests.GenericFixture(11.5d).Test2\n'
+ 'NUnit.Tests.GenericFixture(5).Test1\n'
+ 'NUnit.Tests.GenericFixture(5).Test2\n'
+ 'NUnit.Tests.IgnoredFixture.Test1\nNUnit.Tests.IgnoredFixture.Test2\n'
+ 'NUnit.Tests.IgnoredFixture.Test3\n'
+ 'NUnit.Tests.ParameterizedFixture(42).Test1\n'
+ 'NUnit.Tests.ParameterizedFixture(42).Test2\n'
+ 'NUnit.Tests.ParameterizedFixture(5).Test1\n'
+ 'NUnit.Tests.ParameterizedFixture(5).Test2\n'
+ 'NUnit.Tests.Singletons.OneTestCase.TestCase\n'
+ 'NUnit.Tests.TestAssembly.MockTestFixture.MyTest'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct.junit-xml b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.junit-xml
new file mode 100644
index 0000000..e586442
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.junit-xml
@@ -0,0 +1,160 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ at NUnit.Tests.Assemblies.MockTestFixture.FailingTest () [0x00000] in /home/charlie/Dev/NUnit/nunit-2.5/work/src/tests/mock-assembly/MockAssembly.cs:121
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ at NUnit.Tests.Assemblies.MockTestFixture.TestWithException () [0x00000] in /home/charlie/Dev/NUnit/nunit-2.5/work/src/tests/mock-assembly/MockAssembly.cs:153
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct.results b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.results
new file mode 100644
index 0000000..f19758e
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.results
@@ -0,0 +1,484 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=11,
+ suite_tests=28,
+ suite_skipped=8,
+ suite_failures=1,
+ suite_errors=1,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='NUnit.Tests.Assemblies.MockTestFixture',
+ tests=10,
+ skipped=4,
+ failures=1,
+ errors=1,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='NUnit.Tests.BadFixture',
+ tests=1,
+ skipped=1,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='NUnit.Tests.GenericMethod',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='NUnit.Tests.MethodWithParameters',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='NUnit.Tests.GenericFixture(11.5d)',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='NUnit.Tests.GenericFixture(5)',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='NUnit.Tests.IgnoredFixture',
+ tests=3,
+ skipped=3,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='NUnit.Tests.ParameterizedFixture(42)',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='NUnit.Tests.ParameterizedFixture(5)',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='NUnit.Tests.Singletons.OneTestCase',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='NUnit.Tests.TestAssembly.MockTestFixture',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.Assemblies.MockTestFixture.FailingTest',
+ result='failure',
+ message='Intentional failure',
+ content='\n at '
+ 'NUnit.Tests.Assemblies.MockTestFixture.FailingTest () [0x00000] in '
+ '/home/charlie/Dev/NUnit/nunit-2.5/work/src/tests/mock-assembly/MockAs'
+ 'sembly.cs:121\n\n ',
+ stdout=None,
+ stderr=None,
+ time=0.013
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.Assemblies.MockTestFixture.InconclusiveTest',
+ result='skipped',
+ message='No valid data',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.Assemblies.MockTestFixture.MockTest1',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.Assemblies.MockTestFixture.MockTest2',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.Assemblies.MockTestFixture.MockTest3',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.Assemblies.MockTestFixture.MockTest4',
+ result='skipped',
+ message='ignoring this test method for now',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=None
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.Assemblies.MockTestFixture.MockTest5',
+ result='skipped',
+ message='Method is not public',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=None
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.Assemblies.MockTestFixture.NotRunnableTest',
+ result='skipped',
+ message='No arguments were provided',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=None
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.Assemblies.MockTestFixture.TestWithException',
+ result='error',
+ message='System.ApplicationException : Intentional Exception',
+ content='\n at '
+ 'NUnit.Tests.Assemblies.MockTestFixture.TestWithException () '
+ '[0x00000] in '
+ '/home/charlie/Dev/NUnit/nunit-2.5/work/src/tests/mock-assembly/MockAs'
+ 'sembly.cs:153\n\n ',
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.Assemblies.MockTestFixture.TestWithManyProperties',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.BadFixture.SomeTest',
+ result='skipped',
+ message='No suitable constructor was found',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=None
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.FixtureWithTestCases.GenericMethod(9.2d,11.7d)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.007
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.FixtureWithTestCases.GenericMethod(2,4)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.FixtureWithTestCases.MethodWithParameters(9,11)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.FixtureWithTestCases.MethodWithParameters(2,2)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.GenericFixture(11.5d).Test1',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.GenericFixture(11.5d).Test2',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.GenericFixture(5).Test1',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.GenericFixture(5).Test2',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.IgnoredFixture.Test1',
+ result='skipped',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=None
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.IgnoredFixture.Test2',
+ result='skipped',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=None
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.IgnoredFixture.Test3',
+ result='skipped',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=None
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.ParameterizedFixture(42).Test1',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.ParameterizedFixture(42).Test2',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.ParameterizedFixture(5).Test1',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.ParameterizedFixture(5).Test2',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.Singletons.OneTestCase.TestCase',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='NUnit.Tests.TestAssembly.MockTestFixture.MyTest',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct.xml b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.xml
new file mode 100644
index 0000000..abd12b0
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct.xml
@@ -0,0 +1,194 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations
new file mode 100644
index 0000000..4369109
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.annotations
@@ -0,0 +1,486 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 183 tests pass in 0s',
+ 'summary':
+ '\u205f\u2004\u205f\u20041 files\u2004\u2003102 suites\u2004\u2003\u2002'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '183 tests\u2003183 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '0 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '0 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n218 runs\u2006\u2003'
+ '218 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '0 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '0 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MSw6AIAwFr'
+ '0JYu/CzMV7GEITY+MGUsjLe3YoY0V1nXjq7tDAbLztRFUL6AHRDWTMOARWBW1mUjDxRHN'
+ 'vmod4Hrf9qgi3/6K2C+SMMosNkMKxXs67aBE8yN28xchaMnPe0WxYghnQJPyp5nNtosNP'
+ 'nAAAA\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 183 tests, see "Raw output" for the full list of tests.',
+ 'title': '183 tests found',
+ 'raw_details':
+ 'imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Clone.should'
+ '_return_different_object\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Clone.should'
+ '_return_same_value\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AmountTests+MutableCompo'
+ 'nents.should_return_empty\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Nudge.should'
+ '_change_value_in_range(100,-10,1)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Nudge.should'
+ '_change_value_in_range(100,-100,1)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Nudge.should'
+ '_change_value_in_range(100,-2,8)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Nudge.should'
+ '_change_value_in_range(100,2,12)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Nudge.should'
+ '_change_value_in_range(1000,1000,321)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AmountTests+RandomizeVal'
+ 'ues.should_randomize_value_in_range\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AngleTests+Clone.should_'
+ 'return_different_object\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AngleTests+Clone.should_'
+ 'return_same_value\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AngleTests+MutableCompon'
+ 'ents.should_return_empty\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AngleTests+Nudge.should_'
+ 'wrap_by_a_value_between_1_and_max_value_in_both_directions(10,355,T'
+ 'rue,5)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AngleTests+Nudge.should_'
+ 'wrap_by_a_value_between_1_and_max_value_in_both_directions(10,40,Fa'
+ 'lse,330)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AngleTests+Nudge.should_'
+ 'wrap_by_a_value_between_1_and_max_value_in_both_directions(10,40,Tr'
+ 'ue,50)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AngleTests+Nudge.should_'
+ 'wrap_by_a_value_between_1_and_max_value_in_both_directions(10,6,Fal'
+ 'se,4)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.AngleTests+RandomizeValu'
+ 'e.should_select_a_value_between_0_and_360\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.ColorTests+Clone.should_'
+ 'clone_component\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.ColorTests+Clone.should_'
+ 'copy_values\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.ColorTests+Constructor.s'
+ 'hould_initialize_data\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.ColorTests+ImplicitConve'
+ 'rsionToDrawingColor.should_convert_to_drawing_color\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.ColorTests+MutableCompon'
+ 'ents.should_not_contain_anything\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeAlph'
+ 'a.should_randomize_alpha_in_range\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeBlue'
+ '.should_randomize_alpha_in_range\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeGree'
+ 'n.should_randomize_alpha_in_range\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeRed.'
+ 'should_randomize_alpha_in_range\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeValu'
+ 'es.should_randomize_colors_in_the_range\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Clone.shoul'
+ 'd_return_different_object\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Clone.shoul'
+ 'd_return_same_value\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+MutableComp'
+ 'onents.should_return_empty\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Nudge.shoul'
+ 'd_change_value_in_range(100,-10,1)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Nudge.shoul'
+ 'd_change_value_in_range(100,-100,1)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Nudge.shoul'
+ 'd_change_value_in_range(100,-2,8)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Nudge.shoul'
+ 'd_change_value_in_range(100,10,16)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Nudge.shoul'
+ 'd_change_value_in_range(100,2,12)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+RandomizeVa'
+ 'lues.should_randomize_value_in_range\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PositionTests+Clone.shou'
+ 'ld_return_different_object\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PositionTests+Clone.shou'
+ 'ld_return_same_value\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PositionTests+MutableCom'
+ 'ponents.should_return_empty\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PositionTests+Nudge.shou'
+ 'ld_change_value_in_range(10,10,1,False,9,9)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PositionTests+Nudge.shou'
+ 'ld_change_value_in_range(10,10,1,True,11,11)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PositionTests+Nudge.shou'
+ 'ld_change_value_in_range(10,10,100,False,0,0)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PositionTests+Nudge.shou'
+ 'ld_change_value_in_range(10,10,1000,True,321,654)\n'
+ 'imG.Approx.Tests.Components.BuildingBlocks.PositionTests+RandomizeV'
+ 'alues.should_return_value_inside_target_limits\n'
+ 'imG.Approx.Tests.Components.DrawingTest+Clone.should_clone_all_shap'
+ 'es\n'
+ 'imG.Approx.Tests.Components.DrawingTest+Clone.should_clone_inner_co'
+ 'mponents\n'
+ 'imG.Approx.Tests.Components.DrawingTest+Clone.should_copy_propertie'
+ 's\n'
+ 'imG.Approx.Tests.Components.DrawingTest+Clone.should_create_clone_o'
+ 'f_target\n'
+ 'imG.Approx.Tests.Components.DrawingTest+Constructor.should_keep_dat'
+ 'a\n'
+ 'imG.Approx.Tests.Components.DrawingTest+Draw.should_draw_all_shapes'
+ '\n'
+ 'imG.Approx.Tests.Components.DrawingTest+Draw.should_fill_image_with'
+ '_background_color\n'
+ 'imG.Approx.Tests.Components.DrawingTest+Draw.should_return_correct_'
+ 'size_image\n'
+ 'imG.Approx.Tests.Components.DrawingTest+MutableComponents.should_co'
+ 'ntain_all_shapes\n'
+ 'imG.Approx.Tests.Components.DrawingTest+MutableComponents.should_co'
+ 'ntain_color\n'
+ 'imG.Approx.Tests.Components.Shapes.AreaTests+Clone.should_return_di'
+ 'fferent_object(System.Func`2[imG.Approx.Components.Shapes.Area,Syst'
+ 'em.Object])\n'
+ 'imG.Approx.Tests.Components.Shapes.AreaTests+Draw.should_draw\n'
+ 'imG.Approx.Tests.Components.Shapes.AreaTests+InitializeComponents.s'
+ 'hould_randomize_elements\n'
+ 'imG.Approx.Tests.Components.Shapes.AreaTests+MutableComponents.shou'
+ 'ld_return_components(imG.Approx.Components.Shapes.Area,imG.Approx.C'
+ 'omponents.BuildingBlocks.Angle)\n'
+ 'imG.Approx.Tests.Components.Shapes.AreaTests+MutableComponents.shou'
+ 'ld_return_components(imG.Approx.Components.Shapes.Area,imG.Approx.C'
+ 'omponents.BuildingBlocks.Color)\n'
+ 'imG.Approx.Tests.Components.Shapes.AreaTests+MutableComponents.shou'
+ 'ld_return_components(imG.Approx.Components.Shapes.Area,imG.Approx.C'
+ 'omponents.BuildingBlocks.Position)\n'
+ 'imG.Approx.Tests.Components.Shapes.BezierTests+Clone.should_return_'
+ 'different_object(System.Func`2[imG.Approx.Components.Shapes.Bezier,'
+ 'System.Object])\n'
+ 'imG.Approx.Tests.Components.Shapes.BezierTests+Draw.should_draw\n'
+ 'imG.Approx.Tests.Components.Shapes.BezierTests+InitializeComponents'
+ '.should_randomize_elements\n'
+ 'imG.Approx.Tests.Components.Shapes.BezierTests+MutableComponents.sh'
+ 'ould_return_components(imG.Approx.Components.Shapes.Bezier,imG.Appr'
+ 'ox.Components.BuildingBlocks.Color)\n'
+ 'imG.Approx.Tests.Components.Shapes.BezierTests+MutableComponents.sh'
+ 'ould_return_components(imG.Approx.Components.Shapes.Bezier,imG.Appr'
+ 'ox.Components.BuildingBlocks.PenSize)\n'
+ 'imG.Approx.Tests.Components.Shapes.BezierTests+MutableComponents.sh'
+ 'ould_return_components(imG.Approx.Components.Shapes.Bezier,imG.Appr'
+ 'ox.Components.BuildingBlocks.Position)\n'
+ 'imG.Approx.Tests.Components.Shapes.BlobTests+Clone.should_return_di'
+ 'fferent_object(System.Func`2[imG.Approx.Components.Shapes.Blob,Syst'
+ 'em.Object])\n'
+ 'imG.Approx.Tests.Components.Shapes.BlobTests+Draw.should_draw\n'
+ 'imG.Approx.Tests.Components.Shapes.BlobTests+InitializeComponents.s'
+ 'hould_randomize_elements\n'
+ 'imG.Approx.Tests.Components.Shapes.BlobTests+MutableComponents.shou'
+ 'ld_return_components(imG.Approx.Components.Shapes.Blob,imG.Approx.C'
+ 'omponents.BuildingBlocks.Color)\n'
+ 'imG.Approx.Tests.Components.Shapes.BlobTests+MutableComponents.shou'
+ 'ld_return_components(imG.Approx.Components.Shapes.Blob,imG.Approx.C'
+ 'omponents.BuildingBlocks.Position)\n'
+ 'imG.Approx.Tests.Components.Shapes.CircleTests+Clone.should_return_'
+ 'different_object(System.Func`2[imG.Approx.Components.Shapes.Circle,'
+ 'System.Object])\n'
+ 'imG.Approx.Tests.Components.Shapes.CircleTests+Draw.should_draw\n'
+ 'imG.Approx.Tests.Components.Shapes.CircleTests+InitializeComponents'
+ '.should_randomize_elements\n'
+ 'imG.Approx.Tests.Components.Shapes.CircleTests+MutableComponents.sh'
+ 'ould_return_components(imG.Approx.Components.Shapes.Circle,imG.Appr'
+ 'ox.Components.BuildingBlocks.Amount)\n'
+ 'imG.Approx.Tests.Components.Shapes.CircleTests+MutableComponents.sh'
+ 'ould_return_components(imG.Approx.Components.Shapes.Circle,imG.Appr'
+ 'ox.Components.BuildingBlocks.Color)\n'
+ 'imG.Approx.Tests.Components.Shapes.CircleTests+MutableComponents.sh'
+ 'ould_return_components(imG.Approx.Components.Shapes.Circle,imG.Appr'
+ 'ox.Components.BuildingBlocks.Position)\n'
+ 'imG.Approx.Tests.Components.Shapes.Factories.ConcreteFactory.ShapeF'
+ 'actoryTests+GetShape.should_return_shape\n'
+ 'imG.Approx.Tests.Components.Shapes.Factories.ConcreteFactory.ShapeF'
+ 'actoryTests+Name.should_return_name_by_default\n'
+ 'imG.Approx.Tests.Components.Shapes.Factories.ShapeFactoryCatalogTes'
+ 'ts+ActiveFactories.should_return_only_active_factories\n'
+ 'imG.Approx.Tests.Components.Shapes.Factories.ShapeFactoryCatalogTes'
+ 'ts+Disable.should_enable_factories_named\n'
+ 'imG.Approx.Tests.Components.Shapes.Factories.ShapeFactoryCatalogTes'
+ 'ts+DisableAll.should_disable_all_factories\n'
+ 'imG.Approx.Tests.Components.Shapes.Factories.ShapeFactoryCatalogTes'
+ 'ts+Enable.should_enable_factories_named\n'
+ 'imG.Approx.Tests.Components.Shapes.Factories.ShapeFactoryCatalogTes'
+ 'ts+EnableAll.should_enable_all_factories\n'
+ 'imG.Approx.Tests.Components.Shapes.Factories.ShapeFactoryCatalogTes'
+ 'ts+Register.should_add_factory\n'
+ 'imG.Approx.Tests.Components.Shapes.Factories.ShapeFactoryCatalogTes'
+ 'ts+RegisterAllFactories.should_register_all_factories\n'
+ 'imG.Approx.Tests.Components.Shapes.LineTests+Clone.should_return_di'
+ 'fferent_object(System.Func`2[imG.Approx.Components.Shapes.Line,Syst'
+ 'em.Object])\n'
+ 'imG.Approx.Tests.Components.Shapes.LineTests+Draw.should_draw\n'
+ 'imG.Approx.Tests.Components.Shapes.LineTests+InitializeComponents.s'
+ 'hould_randomize_elements\n'
+ 'imG.Approx.Tests.Components.Shapes.LineTests+MutableComponents.shou'
+ 'ld_return_components(imG.Approx.Components.Shapes.Line,imG.Approx.C'
+ 'omponents.BuildingBlocks.Color)\n'
+ 'imG.Approx.Tests.Components.Shapes.LineTests+MutableComponents.shou'
+ 'ld_return_components(imG.Approx.Components.Shapes.Line,imG.Approx.C'
+ 'omponents.BuildingBlocks.PenSize)\n'
+ 'imG.Approx.Tests.Components.Shapes.LineTests+MutableComponents.shou'
+ 'ld_return_components(imG.Approx.Components.Shapes.Line,imG.Approx.C'
+ 'omponents.BuildingBlocks.Position)\n'
+ 'imG.Approx.Tests.Components.Shapes.PolygonTests+Clone.should_return'
+ '_different_object(System.Func`2[imG.Approx.Components.Shapes.Polygo'
+ 'n,System.Object])\n'
+ 'imG.Approx.Tests.Components.Shapes.PolygonTests+Draw.should_draw\n'
+ 'imG.Approx.Tests.Components.Shapes.PolygonTests+InitializeComponent'
+ 's.should_randomize_elements\n'
+ 'imG.Approx.Tests.Components.Shapes.PolygonTests+MutableComponents.s'
+ 'hould_return_components(imG.Approx.Components.Shapes.Polygon,imG.Ap'
+ 'prox.Components.BuildingBlocks.Color)\n'
+ 'imG.Approx.Tests.Components.Shapes.PolygonTests+MutableComponents.s'
+ 'hould_return_components(imG.Approx.Components.Shapes.Polygon,imG.Ap'
+ 'prox.Components.BuildingBlocks.Position)\n'
+ 'imG.Approx.Tests.Components.Shapes.RectangleTests+Clone.should_retu'
+ 'rn_different_object(System.Func`2[imG.Approx.Components.Shapes.Rect'
+ 'angle,System.Object])\n'
+ 'imG.Approx.Tests.Components.Shapes.RectangleTests+Draw.should_draw\n'
+ 'imG.Approx.Tests.Components.Shapes.RectangleTests+InitializeCompone'
+ 'nts.should_randomize_elements\n'
+ 'imG.Approx.Tests.Components.Shapes.RectangleTests+MutableComponents'
+ '.should_return_components(imG.Approx.Components.Shapes.Rectangle,im'
+ 'G.Approx.Components.BuildingBlocks.Amount)\n'
+ 'imG.Approx.Tests.Components.Shapes.RectangleTests+MutableComponents'
+ '.should_return_components(imG.Approx.Components.Shapes.Rectangle,im'
+ 'G.Approx.Components.BuildingBlocks.Color)\n'
+ 'imG.Approx.Tests.Components.Shapes.RectangleTests+MutableComponents'
+ '.should_return_components(imG.Approx.Components.Shapes.Rectangle,im'
+ 'G.Approx.Components.BuildingBlocks.Position)\n'
+ 'imG.Approx.Tests.Components.Shapes.TriangleTests+Clone.should_retur'
+ 'n_different_object(System.Func`2[imG.Approx.Components.Shapes.Trian'
+ 'gle,System.Object])\n'
+ 'imG.Approx.Tests.Components.Shapes.TriangleTests+Draw.should_draw\n'
+ 'imG.Approx.Tests.Components.Shapes.TriangleTests+InitializeComponen'
+ 'ts.should_randomize_elements\n'
+ 'imG.Approx.Tests.Components.Shapes.TriangleTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Triangle,imG.'
+ 'Approx.Components.BuildingBlocks.Color)\n'
+ 'imG.Approx.Tests.Components.Shapes.TriangleTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Triangle,imG.'
+ 'Approx.Components.BuildingBlocks.Position)\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.should_return'
+ '_mutation_description_determined_by_random_provider(System.Collecti'
+ 'ons.Generic.Dictionary`2[imG.Approx.Mutation.IMutationDescription,S'
+ 'ystem.Collections.Generic.List`1[imG.Approx.Mutation.IMutable]],Cas'
+ 'tle.Proxies.IMutationDescriptionProxy,Castle.Proxies.IMutableProxy,'
+ '0,0)\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.should_return'
+ '_mutation_description_determined_by_random_provider(System.Collecti'
+ 'ons.Generic.Dictionary`2[imG.Approx.Mutation.IMutationDescription,S'
+ 'ystem.Collections.Generic.List`1[imG.Approx.Mutation.IMutable]],Cas'
+ 'tle.Proxies.IMutationDescriptionProxy,Castle.Proxies.IMutableProxy,'
+ '0,1)\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.should_return'
+ '_mutation_description_determined_by_random_provider(System.Collecti'
+ 'ons.Generic.Dictionary`2[imG.Approx.Mutation.IMutationDescription,S'
+ 'ystem.Collections.Generic.List`1[imG.Approx.Mutation.IMutable]],Cas'
+ 'tle.Proxies.IMutationDescriptionProxy,Castle.Proxies.IMutableProxy,'
+ '6,0)\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.should_return'
+ '_mutation_description_determined_by_random_provider(System.Collecti'
+ 'ons.Generic.Dictionary`2[imG.Approx.Mutation.IMutationDescription,S'
+ 'ystem.Collections.Generic.List`1[imG.Approx.Mutation.IMutable]],Cas'
+ 'tle.Proxies.IMutationDescriptionProxy,Castle.Proxies.IMutableProxy,'
+ '6,1)\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.should_return'
+ '_mutation_description_determined_by_random_provider(System.Collecti'
+ 'ons.Generic.Dictionary`2[imG.Approx.Mutation.IMutationDescription,S'
+ 'ystem.Collections.Generic.List`1[imG.Approx.Mutation.IMutable]],Cas'
+ 'tle.Proxies.IMutationDescriptionProxy,Castle.Proxies.IMutableProxy,'
+ '7,0)\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.should_return'
+ '_mutation_description_determined_by_random_provider(System.Collecti'
+ 'ons.Generic.Dictionary`2[imG.Approx.Mutation.IMutationDescription,S'
+ 'ystem.Collections.Generic.List`1[imG.Approx.Mutation.IMutable]],Cas'
+ 'tle.Proxies.IMutationDescriptionProxy,Castle.Proxies.IMutableProxy,'
+ '7,1)\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.should_return'
+ '_null_if_no_mutation_exists\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+GetMutationsFor.should_retur'
+ 'n_active_and_applicable_and_selectable_mutations\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+GetMutationsFor.should_retur'
+ 'n_empty_if_mutable_is_unknown\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+GetMutationsFor.should_retur'
+ 'n_mutations_recursively\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+NoOpDescription.should_alway'
+ 's_have_occasions_to_mutate\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+NoOpDescription.should_alway'
+ 's_mutate_without_doing_anything_to_the_target\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+NoOpDescription.should_alway'
+ 's_target_IMutableType\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+NoOpDescription.should_be_al'
+ 'ways_able_to_mutate\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+NoOpDescription.should_be_al'
+ 'ways_active\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+SelectMutation.should_return'
+ '_a_mutation\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+SelectMutation.should_return'
+ '_matching_selected_mutation\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+SelectMutation.should_return'
+ '_the_default_mutation_if_no_mutation_exists\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+SelectMutation.should_throw_'
+ 'if_any_component_is_null(imG.Approx.Mutation.Process,null)\n'
+ 'imG.Approx.Tests.Mutation.MutagenTests+SelectMutation.should_throw_'
+ 'if_any_component_is_null(null,Castle.Proxies.IMutableProxy)\n'
+ 'imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+DeclareMut'
+ 'ation.should_add_description_to_catalog\n'
+ 'imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+DeclareMut'
+ 'ation.should_throw_when_the_same_description_is_declared_twice\n'
+ 'imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+For.should'
+ '_return_empty_list_for_unknown_mutable_type\n'
+ 'imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+For.should'
+ '_return_list_of_descriptions_for_type(imG.Approx.Tests.Mutation.Mut'
+ 'ableAndDescription.Mutable1,imG.Approx.Mutation.MutationDescription'
+ '`1[imG.Approx.Tests.Mutation.MutableAndDescription.Mutable1],imG.Ap'
+ 'prox.Mutation.MutationDescription`1[imG.Approx.Tests.Mutation.Mutab'
+ 'leAndDescription.Mutable2])\n'
+ 'imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+For.should'
+ '_return_list_of_descriptions_for_type(imG.Approx.Tests.Mutation.Mut'
+ 'ableAndDescription.Mutable2,imG.Approx.Mutation.MutationDescription'
+ '`1[imG.Approx.Tests.Mutation.MutableAndDescription.Mutable2],imG.Ap'
+ 'prox.Mutation.MutationDescription`1[imG.Approx.Tests.Mutation.Mutab'
+ 'leAndDescription.Mutable3])\n'
+ 'imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+For.should'
+ '_return_list_of_descriptions_for_type(imG.Approx.Tests.Mutation.Mut'
+ 'ableAndDescription.Mutable3,imG.Approx.Mutation.MutationDescription'
+ '`1[imG.Approx.Tests.Mutation.MutableAndDescription.Mutable3],imG.Ap'
+ 'prox.Mutation.MutationDescription`1[imG.Approx.Tests.Mutation.Mutab'
+ 'leAndDescription.Mutable1])\n'
+ 'imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+RegisterAl'
+ 'lMutations.should_register_all_mutations_declared_by_registrars\n'
+ 'imG.Approx.Tests.Mutation.MutationDescriptionTests+CanMutate.lambda'
+ '_is_called_when_checking\n'
+ 'imG.Approx.Tests.Mutation.MutationDescriptionTests+Constructor.shou'
+ 'ld_refuse_odds_that_are_not_positive(-1)\n'
+ 'imG.Approx.Tests.Mutation.MutationDescriptionTests+Constructor.shou'
+ 'ld_refuse_odds_that_are_not_positive(-1000)\n'
+ 'imG.Approx.Tests.Mutation.MutationDescriptionTests+Constructor.shou'
+ 'ld_refuse_odds_that_are_not_positive(0)\n'
+ 'imG.Approx.Tests.Mutation.MutationDescriptionTests+GetMutationTarge'
+ 'tType.should_return_type_of_generic\n'
+ 'imG.Approx.Tests.Mutation.MutationDescriptionTests+Mutate.lambda_is'
+ '_called_when_mutating\n'
+ 'imG.Approx.Tests.Mutation.ProcessTests+Constructor.should_throw_if_'
+ 'any_argument_is_null(Castle.Proxies.IRandomizationProviderProxy,Cas'
+ 'tle.Proxies.IMutationDescriptionCatalogProxy,Castle.Proxies.ITarget'
+ 'Proxy,null)\n'
+ 'imG.Approx.Tests.Mutation.ProcessTests+Constructor.should_throw_if_'
+ 'any_argument_is_null(Castle.Proxies.IRandomizationProviderProxy,Cas'
+ 'tle.Proxies.IMutationDescriptionCatalogProxy,null,Castle.Proxies.IS'
+ 'hapeFactoryCatalogProxy)\n'
+ 'imG.Approx.Tests.Mutation.ProcessTests+Constructor.should_throw_if_'
+ 'any_argument_is_null(Castle.Proxies.IRandomizationProviderProxy,nul'
+ 'l,Castle.Proxies.ITargetProxy,Castle.Proxies.IShapeFactoryCatalogPr'
+ 'oxy)\n'
+ 'imG.Approx.Tests.Mutation.ProcessTests+Constructor.should_throw_if_'
+ 'any_argument_is_null(null,Castle.Proxies.IMutationDescriptionCatalo'
+ 'gProxy,Castle.Proxies.ITargetProxy,Castle.Proxies.IShapeFactoryCata'
+ 'logProxy)\n'
+ 'imG.Approx.Tests.Mutation.ProcessTests+Mutate.should_always_keep_be'
+ 'st_drawing_according_to_distance(False)\n'
+ 'imG.Approx.Tests.Mutation.ProcessTests+Mutate.should_always_keep_be'
+ 'st_drawing_according_to_distance(True)\n'
+ 'imG.Approx.Tests.Mutation.ProcessTests+Mutate.should_increase_evolu'
+ 'tions_when_drawing_is_better\n'
+ 'imG.Approx.Tests.Mutation.ProcessTests+Mutate.should_increase_gener'
+ 'ation_number\n'
+ 'imG.Approx.Tests.Mutation.ProcessTests+Mutate.should_trigger_event_'
+ 'when_drawing_is_better\n'
+ 'imG.Approx.Tests.Mutation.ProcessTests+Mutate.should_trigger_event_'
+ 'when_drawing_is_worse\n'
+ 'imG.Approx.Tests.Mutation.ProcessTests+SetupDrawing.should_compute_'
+ 'the_distance_only_the_first_time\n'
+ 'imG.Approx.Tests.Mutation.ProcessTests+SetupDrawing.should_create_d'
+ 'rawing_based_on_target\n'
+ 'imG.Approx.Tests.Mutation.ProcessTests+SetupDrawing.should_create_t'
+ 'he_drawing_only_the_first_time\n'
+ 'imG.Approx.Tests.Mutation.RandomizationProviderTests+Constructor.sh'
+ 'ould_keep_the_seed\n'
+ 'imG.Approx.Tests.Mutation.RandomizationProviderTests+Next.should_re'
+ 'turn_integer\n'
+ 'imG.Approx.Tests.Mutation.TargetTests+Constructor.should_keep_initi'
+ 'alized_data(System.Func`2[imG.Approx.Mutation.Target,System.Object]'
+ ',"data\\\\red.png")\n'
+ 'imG.Approx.Tests.Mutation.TargetTests+Constructor.should_keep_initi'
+ 'alized_data(System.Func`2[imG.Approx.Mutation.Target,System.Object]'
+ ',25)\n'
+ 'imG.Approx.Tests.Mutation.TargetTests+DistanceTo.should_not_throw_i'
+ 'f_dimensions_are_identical\n'
+ 'imG.Approx.Tests.Mutation.TargetTests+DistanceTo.should_throw_if_di'
+ 'mensions_are_different(imG.Approx.Components.Drawing)\n'
+ 'imG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_load_dim'
+ 'ensions_from_image\n'
+ 'imG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_load_ima'
+ 'ge_data\n'
+ 'imG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_not_resi'
+ 'ze_if_image_dimensions_are_over_or_equal_to_maxDimension(100)\n'
+ 'imG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_not_resi'
+ 'ze_if_image_dimensions_are_over_or_equal_to_maxDimension(50)\n'
+ 'imG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_resize_i'
+ 'f_image_dimensions_are_over_maxDimension\n'
+ 'imG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_set_rati'
+ 'o_to_correct_value_when_loading(10,0.2f)\n'
+ 'imG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_set_rati'
+ 'o_to_correct_value_when_loading(25,0.5f)\n'
+ 'imG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_set_rati'
+ 'o_to_correct_value_when_loading(50,1.0f)\n'
+ 'imG.Approx.Tests.Mutation.TargetTests+LoadImageData.should_set_rati'
+ 'o_to_correct_value_when_loading(99,1.0f)\n'
+ 'imG.Approx.Tests.Mutation.TargetTests+Name.should_return_filename\n'
+ 'imG.Approx.Tests.Tools.TestValues+Clamp.should_return_max_value_bet'
+ 'ween_original_and_min_value(1,0,1)\n'
+ 'imG.Approx.Tests.Tools.TestValues+Clamp.should_return_max_value_bet'
+ 'ween_original_and_min_value(1,1,1)\n'
+ 'imG.Approx.Tests.Tools.TestValues+Clamp.should_return_max_value_bet'
+ 'ween_original_and_min_value(1,10,10)\n'
+ 'imG.Approx.Tests.Tools.TestValues+Clamp.should_return_min_value_bet'
+ 'ween_original_and_max_value(1,0,0)\n'
+ 'imG.Approx.Tests.Tools.TestValues+Clamp.should_return_min_value_bet'
+ 'ween_original_and_max_value(1,1,1)\n'
+ 'imG.Approx.Tests.Tools.TestValues+Clamp.should_return_min_value_bet'
+ 'ween_original_and_max_value(1,10,1)\n'
+ 'imG.Approx.Tests.Tools.TestValues+Clamp.should_throw_if_min_is_abov'
+ 'e_max\n'
+ 'imG.Approx.Tests.Tools.TestValues+Wrap.should_throw_if_min_is_above'
+ '_max\n'
+ 'imG.Approx.Tests.Tools.TestValues+Wrap.should_wrap_back_to_range(-1'
+ '01,10,20,19)\n'
+ 'imG.Approx.Tests.Tools.TestValues+Wrap.should_wrap_back_to_range(10'
+ ',10,25,10)\n'
+ 'imG.Approx.Tests.Tools.TestValues+Wrap.should_wrap_back_to_range(10'
+ '1,10,20,11)\n'
+ 'imG.Approx.Tests.Tools.TestValues+Wrap.should_wrap_back_to_range(16'
+ ',10,25,16)\n'
+ 'imG.Approx.Tests.Tools.TestValues+Wrap.should_wrap_back_to_range(25'
+ ',10,25,10)'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.junit-xml b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.junit-xml
new file mode 100644
index 0000000..031729c
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.junit-xml
@@ -0,0 +1,767 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.results b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.results
new file mode 100644
index 0000000..1c398ea
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.results
@@ -0,0 +1,4131 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=102,
+ suite_tests=218,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Clone',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.AmountTests+'
+ 'MutableComponents',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.'
+ 'should_change_value_in_range',
+ tests=5,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.AmountTests+'
+ 'RandomizeValues',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.AngleTests+Clone',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.AngleTests+'
+ 'MutableComponents',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.'
+ 'should_wrap_by_a_value_between_1_and_max_value_in_both_directions',
+ tests=4,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.AngleTests+RandomizeValue',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+Clone',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+Constructor',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+'
+ 'ImplicitConversionToDrawingColor',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+'
+ 'MutableComponents',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeAlpha',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeBlue',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeGreen',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeRed',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeValues',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Clone',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+'
+ 'MutableComponents',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.'
+ 'should_change_value_in_range',
+ tests=5,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+'
+ 'RandomizeValues',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.PositionTests+Clone',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.PositionTests+'
+ 'MutableComponents',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.'
+ 'should_change_value_in_range',
+ tests=4,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.BuildingBlocks.PositionTests+'
+ 'RandomizeValues',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.DrawingTest+Clone',
+ tests=4,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.DrawingTest+Constructor',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.DrawingTest+Draw',
+ tests=3,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.DrawingTest+MutableComponents',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_different_object',
+ tests=3,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.AreaTests+Draw',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.AreaTests+InitializeComponents',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_components',
+ tests=3,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_different_object',
+ tests=6,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.BezierTests+Draw',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.BezierTests+InitializeComponents',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_components',
+ tests=6,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_different_object',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.BlobTests+Draw',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.BlobTests+InitializeComponents',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_components',
+ tests=5,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_different_object',
+ tests=3,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.CircleTests+Draw',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.CircleTests+InitializeComponents',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_components',
+ tests=3,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.Factories.ConcreteFactory.'
+ 'ShapeFactoryTests+GetShape',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.Factories.ConcreteFactory.'
+ 'ShapeFactoryTests+Name',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.Factories.'
+ 'ShapeFactoryCatalogTests+ActiveFactories',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.Factories.'
+ 'ShapeFactoryCatalogTests+Disable',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.Factories.'
+ 'ShapeFactoryCatalogTests+DisableAll',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.Factories.'
+ 'ShapeFactoryCatalogTests+Enable',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.Factories.'
+ 'ShapeFactoryCatalogTests+EnableAll',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.Factories.'
+ 'ShapeFactoryCatalogTests+Register',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.Factories.'
+ 'ShapeFactoryCatalogTests+RegisterAllFactories',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_different_object',
+ tests=4,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.LineTests+Draw',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.LineTests+InitializeComponents',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_components',
+ tests=4,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_different_object',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.PolygonTests+Draw',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.PolygonTests+InitializeComponents',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_components',
+ tests=5,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_different_object',
+ tests=4,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.RectangleTests+Draw',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.RectangleTests+'
+ 'InitializeComponents',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_components',
+ tests=4,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_different_object',
+ tests=4,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.TriangleTests+Draw',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.TriangleTests+InitializeComponents',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Components.Shapes.should_return_components',
+ tests=4,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.'
+ 'should_return_mutation_description_determined_by_random_provider',
+ tests=6,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.MutagenTests+ChooseMutation',
+ tests=7,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.MutagenTests+GetMutationsFor',
+ tests=3,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.MutagenTests+NoOpDescription',
+ tests=5,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.should_throw_if_any_component_is_null',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.MutagenTests+SelectMutation',
+ tests=5,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+'
+ 'DeclareMutation',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.should_return_list_of_descriptions_for_type',
+ tests=3,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+For',
+ tests=4,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+'
+ 'RegisterAllMutations',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.MutationDescriptionTests+CanMutate',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.should_refuse_odds_that_are_not_positive',
+ tests=3,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.MutationDescriptionTests+'
+ 'GetMutationTargetType',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.MutationDescriptionTests+Mutate',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.should_throw_if_any_argument_is_null',
+ tests=4,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.'
+ 'should_always_keep_best_drawing_according_to_distance',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.ProcessTests+Mutate',
+ tests=6,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.ProcessTests+SetupDrawing',
+ tests=3,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.RandomizationProviderTests+Constructor',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.RandomizationProviderTests+Next',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.should_keep_initialized_data',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.should_throw_if_dimensions_are_different',
+ tests=3,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.TargetTests+DistanceTo',
+ tests=4,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.'
+ 'should_not_resize_if_image_dimensions_are_over_or_equal_to_maxDimensi'
+ 'on',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.'
+ 'should_set_ratio_to_correct_value_when_loading',
+ tests=4,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.TargetTests+LoadImageData',
+ tests=9,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Mutation.TargetTests+Name',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Tools.'
+ 'should_return_max_value_between_original_and_min_value',
+ tests=3,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Tools.'
+ 'should_return_min_value_between_original_and_max_value',
+ tests=3,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Tools.TestValues+Clamp',
+ tests=7,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Tools.should_wrap_back_to_range',
+ tests=5,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='imG.Approx.Tests.Tools.TestValues+Wrap',
+ tests=6,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Clone.'
+ 'should_return_different_object',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.012
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Clone.'
+ 'should_return_same_value',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.007
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AmountTests+'
+ 'MutableComponents.should_return_empty',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Nudge.'
+ 'should_change_value_in_range(100,-2,8)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.006
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Nudge.'
+ 'should_change_value_in_range(100,-100,1)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Nudge.'
+ 'should_change_value_in_range(100,-10,1)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Nudge.'
+ 'should_change_value_in_range(1000,1000,321)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AmountTests+Nudge.'
+ 'should_change_value_in_range(100,2,12)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AmountTests+'
+ 'RandomizeValues.should_randomize_value_in_range',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AngleTests+Clone.'
+ 'should_return_different_object',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AngleTests+Clone.'
+ 'should_return_same_value',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AngleTests+'
+ 'MutableComponents.should_return_empty',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AngleTests+Nudge.'
+ 'should_wrap_by_a_value_between_1_and_max_value_in_both_directions(10,'
+ '40,False,330)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AngleTests+Nudge.'
+ 'should_wrap_by_a_value_between_1_and_max_value_in_both_directions(10,'
+ '355,True,5)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AngleTests+Nudge.'
+ 'should_wrap_by_a_value_between_1_and_max_value_in_both_directions(10,'
+ '6,False,4)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AngleTests+Nudge.'
+ 'should_wrap_by_a_value_between_1_and_max_value_in_both_directions(10,'
+ '40,True,50)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.AngleTests+RandomizeValue.'
+ 'should_select_a_value_between_0_and_360',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+Clone.'
+ 'should_clone_component',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+Clone.'
+ 'should_copy_values',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+Constructor.'
+ 'should_initialize_data',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+'
+ 'ImplicitConversionToDrawingColor.should_convert_to_drawing_color',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+'
+ 'MutableComponents.should_not_contain_anything',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeAlpha.'
+ 'should_randomize_alpha_in_range',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeBlue.'
+ 'should_randomize_alpha_in_range',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeGreen.'
+ 'should_randomize_alpha_in_range',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+RandomizeRed.'
+ 'should_randomize_alpha_in_range',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.ColorTests+'
+ 'RandomizeValues.should_randomize_colors_in_the_range',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Clone.'
+ 'should_return_different_object',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Clone.'
+ 'should_return_same_value',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+'
+ 'MutableComponents.should_return_empty',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Nudge.'
+ 'should_change_value_in_range(100,-100,1)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Nudge.'
+ 'should_change_value_in_range(100,10,16)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Nudge.'
+ 'should_change_value_in_range(100,2,12)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Nudge.'
+ 'should_change_value_in_range(100,-2,8)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+Nudge.'
+ 'should_change_value_in_range(100,-10,1)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PenSizeTests+'
+ 'RandomizeValues.should_randomize_value_in_range',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PositionTests+Clone.'
+ 'should_return_different_object',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PositionTests+Clone.'
+ 'should_return_same_value',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PositionTests+'
+ 'MutableComponents.should_return_empty',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PositionTests+Nudge.'
+ 'should_change_value_in_range(10,10,1,True,11,11)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PositionTests+Nudge.'
+ 'should_change_value_in_range(10,10,100,False,0,0)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PositionTests+Nudge.'
+ 'should_change_value_in_range(10,10,1,False,9,9)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PositionTests+Nudge.'
+ 'should_change_value_in_range(10,10,1000,True,321,654)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.BuildingBlocks.PositionTests+'
+ 'RandomizeValues.should_return_value_inside_target_limits',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.007
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.DrawingTest+Clone.should_clone_all_shapes',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.007
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.DrawingTest+Clone.'
+ 'should_clone_inner_components',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.DrawingTest+Clone.should_copy_properties',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.DrawingTest+Clone.'
+ 'should_create_clone_of_target',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.DrawingTest+Constructor.should_keep_data',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.DrawingTest+Draw.should_draw_all_shapes',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.009
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.DrawingTest+Draw.'
+ 'should_fill_image_with_background_color',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.DrawingTest+Draw.'
+ 'should_return_correct_size_image',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.DrawingTest+MutableComponents.'
+ 'should_contain_all_shapes',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.DrawingTest+MutableComponents.'
+ 'should_contain_color',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.AreaTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Area,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.AreaTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Area,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.AreaTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Area,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.AreaTests+Draw.should_draw',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.AreaTests+InitializeComponents.'
+ 'should_randomize_elements',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.AreaTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Area,imG.'
+ 'Approx.Components.BuildingBlocks.Color)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.AreaTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Area,imG.'
+ 'Approx.Components.BuildingBlocks.Angle)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.AreaTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Area,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BezierTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Bezier,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BezierTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Bezier,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BezierTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Bezier,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BezierTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Bezier,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BezierTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Bezier,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BezierTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Bezier,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BezierTests+Draw.should_draw',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BezierTests+InitializeComponents.'
+ 'should_randomize_elements',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BezierTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Bezier,imG.'
+ 'Approx.Components.BuildingBlocks.Color)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BezierTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Bezier,imG.'
+ 'Approx.Components.BuildingBlocks.PenSize)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BezierTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Bezier,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BezierTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Bezier,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BezierTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Bezier,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BezierTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Bezier,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BlobTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Blob,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BlobTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Blob,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BlobTests+Draw.should_draw',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.006
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BlobTests+InitializeComponents.'
+ 'should_randomize_elements',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BlobTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Blob,imG.'
+ 'Approx.Components.BuildingBlocks.Color)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BlobTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Blob,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BlobTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Blob,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BlobTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Blob,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.BlobTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Blob,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.CircleTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Circle,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.CircleTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Circle,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.CircleTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Circle,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.CircleTests+Draw.should_draw',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.CircleTests+InitializeComponents.'
+ 'should_randomize_elements',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.CircleTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Circle,imG.'
+ 'Approx.Components.BuildingBlocks.Color)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.CircleTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Circle,imG.'
+ 'Approx.Components.BuildingBlocks.Amount)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.CircleTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Circle,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.Factories.ConcreteFactory.'
+ 'ShapeFactoryTests+GetShape.should_return_shape',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.Factories.ConcreteFactory.'
+ 'ShapeFactoryTests+Name.should_return_name_by_default',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.Factories.'
+ 'ShapeFactoryCatalogTests+ActiveFactories.'
+ 'should_return_only_active_factories',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.007
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.Factories.'
+ 'ShapeFactoryCatalogTests+Disable.should_enable_factories_named',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.011
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.Factories.'
+ 'ShapeFactoryCatalogTests+DisableAll.should_disable_all_factories',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.Factories.'
+ 'ShapeFactoryCatalogTests+Enable.should_enable_factories_named',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.Factories.'
+ 'ShapeFactoryCatalogTests+EnableAll.should_enable_all_factories',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.Factories.'
+ 'ShapeFactoryCatalogTests+Register.should_add_factory',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.Factories.'
+ 'ShapeFactoryCatalogTests+RegisterAllFactories.'
+ 'should_register_all_factories',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.504
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.LineTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Line,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.LineTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Line,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.LineTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Line,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.LineTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Line,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.LineTests+Draw.should_draw',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.LineTests+InitializeComponents.'
+ 'should_randomize_elements',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.LineTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Line,imG.'
+ 'Approx.Components.BuildingBlocks.Color)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.LineTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Line,imG.'
+ 'Approx.Components.BuildingBlocks.PenSize)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.LineTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Line,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.LineTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Line,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.PolygonTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Polygon,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.PolygonTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Polygon,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.PolygonTests+Draw.should_draw',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.PolygonTests+InitializeComponents.'
+ 'should_randomize_elements',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.PolygonTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Polygon,imG.'
+ 'Approx.Components.BuildingBlocks.Color)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.PolygonTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Polygon,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.PolygonTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Polygon,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.PolygonTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Polygon,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.PolygonTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Polygon,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.RectangleTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Rectangle,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.RectangleTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Rectangle,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.RectangleTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Rectangle,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.RectangleTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Rectangle,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.RectangleTests+Draw.should_draw',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.RectangleTests+'
+ 'InitializeComponents.should_randomize_elements',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.RectangleTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Rectangle,imG.'
+ 'Approx.Components.BuildingBlocks.Color)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.RectangleTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Rectangle,imG.'
+ 'Approx.Components.BuildingBlocks.Amount)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.RectangleTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Rectangle,imG.'
+ 'Approx.Components.BuildingBlocks.Amount)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.RectangleTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Rectangle,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.TriangleTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Triangle,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.TriangleTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Triangle,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.TriangleTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Triangle,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.TriangleTests+Clone.'
+ 'should_return_different_object(System.Func`2[imG.Approx.Components.'
+ 'Shapes.Triangle,System.Object])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.TriangleTests+Draw.should_draw',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.TriangleTests+'
+ 'InitializeComponents.should_randomize_elements',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.TriangleTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Triangle,imG.'
+ 'Approx.Components.BuildingBlocks.Color)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.TriangleTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Triangle,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.TriangleTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Triangle,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Components.Shapes.TriangleTests+MutableComponents.'
+ 'should_return_components(imG.Approx.Components.Shapes.Triangle,imG.'
+ 'Approx.Components.BuildingBlocks.Position)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.'
+ 'should_return_mutation_description_determined_by_random_provider('
+ 'System.Collections.Generic.Dictionary`2[imG.Approx.Mutation.'
+ 'IMutationDescription,System.Collections.Generic.List`1[imG.Approx.'
+ 'Mutation.IMutable]],Castle.Proxies.IMutationDescriptionProxy,Castle.'
+ 'Proxies.IMutableProxy,0,0)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.009
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.'
+ 'should_return_mutation_description_determined_by_random_provider('
+ 'System.Collections.Generic.Dictionary`2[imG.Approx.Mutation.'
+ 'IMutationDescription,System.Collections.Generic.List`1[imG.Approx.'
+ 'Mutation.IMutable]],Castle.Proxies.IMutationDescriptionProxy,Castle.'
+ 'Proxies.IMutableProxy,6,0)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.'
+ 'should_return_mutation_description_determined_by_random_provider('
+ 'System.Collections.Generic.Dictionary`2[imG.Approx.Mutation.'
+ 'IMutationDescription,System.Collections.Generic.List`1[imG.Approx.'
+ 'Mutation.IMutable]],Castle.Proxies.IMutationDescriptionProxy,Castle.'
+ 'Proxies.IMutableProxy,0,1)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.'
+ 'should_return_mutation_description_determined_by_random_provider('
+ 'System.Collections.Generic.Dictionary`2[imG.Approx.Mutation.'
+ 'IMutationDescription,System.Collections.Generic.List`1[imG.Approx.'
+ 'Mutation.IMutable]],Castle.Proxies.IMutationDescriptionProxy,Castle.'
+ 'Proxies.IMutableProxy,6,1)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.'
+ 'should_return_mutation_description_determined_by_random_provider('
+ 'System.Collections.Generic.Dictionary`2[imG.Approx.Mutation.'
+ 'IMutationDescription,System.Collections.Generic.List`1[imG.Approx.'
+ 'Mutation.IMutable]],Castle.Proxies.IMutationDescriptionProxy,Castle.'
+ 'Proxies.IMutableProxy,7,0)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.'
+ 'should_return_mutation_description_determined_by_random_provider('
+ 'System.Collections.Generic.Dictionary`2[imG.Approx.Mutation.'
+ 'IMutationDescription,System.Collections.Generic.List`1[imG.Approx.'
+ 'Mutation.IMutable]],Castle.Proxies.IMutationDescriptionProxy,Castle.'
+ 'Proxies.IMutableProxy,7,1)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+ChooseMutation.'
+ 'should_return_null_if_no_mutation_exists',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+GetMutationsFor.'
+ 'should_return_active_and_applicable_and_selectable_mutations',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+GetMutationsFor.'
+ 'should_return_empty_if_mutable_is_unknown',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+GetMutationsFor.'
+ 'should_return_mutations_recursively',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+NoOpDescription.'
+ 'should_always_have_occasions_to_mutate',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+NoOpDescription.'
+ 'should_always_mutate_without_doing_anything_to_the_target',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+NoOpDescription.'
+ 'should_always_target_IMutableType',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+NoOpDescription.'
+ 'should_be_always_able_to_mutate',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+NoOpDescription.'
+ 'should_be_always_active',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+SelectMutation.'
+ 'should_throw_if_any_component_is_null(null,Castle.Proxies.'
+ 'IMutableProxy)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+SelectMutation.'
+ 'should_throw_if_any_component_is_null(imG.Approx.Mutation.Process,'
+ 'null)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+SelectMutation.'
+ 'should_return_a_mutation',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+SelectMutation.'
+ 'should_return_matching_selected_mutation',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutagenTests+SelectMutation.'
+ 'should_return_the_default_mutation_if_no_mutation_exists',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+'
+ 'DeclareMutation.should_add_description_to_catalog',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+'
+ 'DeclareMutation.'
+ 'should_throw_when_the_same_description_is_declared_twice',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+For.'
+ 'should_return_list_of_descriptions_for_type(imG.Approx.Tests.'
+ 'Mutation.MutableAndDescription.Mutable1,imG.Approx.Mutation.'
+ 'MutationDescription`1[imG.Approx.Tests.Mutation.'
+ 'MutableAndDescription.Mutable1],imG.Approx.Mutation.'
+ 'MutationDescription`1[imG.Approx.Tests.Mutation.'
+ 'MutableAndDescription.Mutable2])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+For.'
+ 'should_return_list_of_descriptions_for_type(imG.Approx.Tests.'
+ 'Mutation.MutableAndDescription.Mutable2,imG.Approx.Mutation.'
+ 'MutationDescription`1[imG.Approx.Tests.Mutation.'
+ 'MutableAndDescription.Mutable2],imG.Approx.Mutation.'
+ 'MutationDescription`1[imG.Approx.Tests.Mutation.'
+ 'MutableAndDescription.Mutable3])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+For.'
+ 'should_return_list_of_descriptions_for_type(imG.Approx.Tests.'
+ 'Mutation.MutableAndDescription.Mutable3,imG.Approx.Mutation.'
+ 'MutationDescription`1[imG.Approx.Tests.Mutation.'
+ 'MutableAndDescription.Mutable3],imG.Approx.Mutation.'
+ 'MutationDescription`1[imG.Approx.Tests.Mutation.'
+ 'MutableAndDescription.Mutable1])',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+For.'
+ 'should_return_empty_list_for_unknown_mutable_type',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutationDescriptionCatalogTest+'
+ 'RegisterAllMutations.'
+ 'should_register_all_mutations_declared_by_registrars',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.204
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutationDescriptionTests+CanMutate.'
+ 'lambda_is_called_when_checking',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.004
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutationDescriptionTests+Constructor.'
+ 'should_refuse_odds_that_are_not_positive(-1000)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutationDescriptionTests+Constructor.'
+ 'should_refuse_odds_that_are_not_positive(0)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutationDescriptionTests+Constructor.'
+ 'should_refuse_odds_that_are_not_positive(-1)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutationDescriptionTests+'
+ 'GetMutationTargetType.should_return_type_of_generic',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.MutationDescriptionTests+Mutate.'
+ 'lambda_is_called_when_mutating',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.ProcessTests+Constructor.'
+ 'should_throw_if_any_argument_is_null(null,Castle.Proxies.'
+ 'IMutationDescriptionCatalogProxy,Castle.Proxies.ITargetProxy,Castle.'
+ 'Proxies.IShapeFactoryCatalogProxy)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.ProcessTests+Constructor.'
+ 'should_throw_if_any_argument_is_null(Castle.Proxies.'
+ 'IRandomizationProviderProxy,null,Castle.Proxies.ITargetProxy,Castle.'
+ 'Proxies.IShapeFactoryCatalogProxy)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.ProcessTests+Constructor.'
+ 'should_throw_if_any_argument_is_null(Castle.Proxies.'
+ 'IRandomizationProviderProxy,Castle.Proxies.'
+ 'IMutationDescriptionCatalogProxy,null,Castle.Proxies.'
+ 'IShapeFactoryCatalogProxy)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.ProcessTests+Constructor.'
+ 'should_throw_if_any_argument_is_null(Castle.Proxies.'
+ 'IRandomizationProviderProxy,Castle.Proxies.'
+ 'IMutationDescriptionCatalogProxy,Castle.Proxies.ITargetProxy,null)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.ProcessTests+Mutate.'
+ 'should_always_keep_best_drawing_according_to_distance(False)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.005
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.ProcessTests+Mutate.'
+ 'should_always_keep_best_drawing_according_to_distance(True)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.ProcessTests+Mutate.'
+ 'should_increase_evolutions_when_drawing_is_better',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.ProcessTests+Mutate.'
+ 'should_increase_generation_number',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.ProcessTests+Mutate.'
+ 'should_trigger_event_when_drawing_is_better',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.ProcessTests+Mutate.'
+ 'should_trigger_event_when_drawing_is_worse',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.ProcessTests+SetupDrawing.'
+ 'should_compute_the_distance_only_the_first_time',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.ProcessTests+SetupDrawing.'
+ 'should_create_drawing_based_on_target',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.ProcessTests+SetupDrawing.'
+ 'should_create_the_drawing_only_the_first_time',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.RandomizationProviderTests+Constructor.'
+ 'should_keep_the_seed',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.RandomizationProviderTests+Next.'
+ 'should_return_integer',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+Constructor.'
+ 'should_keep_initialized_data(System.Func`2[imG.Approx.Mutation.'
+ 'Target,System.Object],25)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.009
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+Constructor.'
+ 'should_keep_initialized_data(System.Func`2[imG.Approx.Mutation.'
+ 'Target,System.Object],"data\\\\red.png")',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+DistanceTo.'
+ 'should_throw_if_dimensions_are_different(imG.Approx.Components.'
+ 'Drawing)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+DistanceTo.'
+ 'should_throw_if_dimensions_are_different(imG.Approx.Components.'
+ 'Drawing)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+DistanceTo.'
+ 'should_throw_if_dimensions_are_different(imG.Approx.Components.'
+ 'Drawing)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+DistanceTo.'
+ 'should_not_throw_if_dimensions_are_identical',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+LoadImageData.'
+ 'should_not_resize_if_image_dimensions_are_over_or_equal_to_maxDimensi'
+ 'on(100)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.003
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+LoadImageData.'
+ 'should_not_resize_if_image_dimensions_are_over_or_equal_to_maxDimensi'
+ 'on(50)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+LoadImageData.'
+ 'should_set_ratio_to_correct_value_when_loading(10,0.2f)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.002
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+LoadImageData.'
+ 'should_set_ratio_to_correct_value_when_loading(25,0.5f)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+LoadImageData.'
+ 'should_set_ratio_to_correct_value_when_loading(99,1.0f)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+LoadImageData.'
+ 'should_set_ratio_to_correct_value_when_loading(50,1.0f)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+LoadImageData.'
+ 'should_load_dimensions_from_image',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+LoadImageData.'
+ 'should_load_image_data',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+LoadImageData.'
+ 'should_resize_if_image_dimensions_are_over_maxDimension',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Mutation.TargetTests+Name.should_return_filename',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Tools.TestValues+Clamp.'
+ 'should_return_max_value_between_original_and_min_value(1,1,1)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Tools.TestValues+Clamp.'
+ 'should_return_max_value_between_original_and_min_value(1,0,1)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Tools.TestValues+Clamp.'
+ 'should_return_max_value_between_original_and_min_value(1,10,10)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Tools.TestValues+Clamp.'
+ 'should_return_min_value_between_original_and_max_value(1,10,1)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Tools.TestValues+Clamp.'
+ 'should_return_min_value_between_original_and_max_value(1,1,1)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Tools.TestValues+Clamp.'
+ 'should_return_min_value_between_original_and_max_value(1,0,0)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Tools.TestValues+Clamp.'
+ 'should_throw_if_min_is_above_max',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Tools.TestValues+Wrap.should_wrap_back_to_range(-'
+ '101,10,20,19)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Tools.TestValues+Wrap.should_wrap_back_to_range(16,'
+ '10,25,16)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Tools.TestValues+Wrap.should_wrap_back_to_range(10,'
+ '10,25,10)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Tools.TestValues+Wrap.should_wrap_back_to_range(25,'
+ '10,25,10)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Tools.TestValues+Wrap.should_wrap_back_to_range(101,'
+ '10,20,11)',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct2.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='imG.Approx.Tests.Tools.TestValues+Wrap.'
+ 'should_throw_if_min_is_above_max',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.xml b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.xml
new file mode 100644
index 0000000..7e70f8b
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct2.xml
@@ -0,0 +1,768 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations
new file mode 100644
index 0000000..7984814
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.annotations
@@ -0,0 +1,67 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 22 tests pass in 4m 24s',
+ 'summary':
+ '22 tests\u2002\u2003\u200322 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '4m 24s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '13 suites\u2003\u2003\u205f\u20040 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004'
+ '1 files\u2004\u2002\u2003\u2003\u205f\u20040 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/12MOw6AIBAFr'
+ '0KoLRSNhZcxBDVuFDELVMa7uwh+uzfzktn4AHNvecOKjHHrwUUoiTqP0oFZiEVdkaDPhV'
+ 'eIC1rrlfqZCVYy+S0GCfNH9IgGk0G/3MWwP8Eont7Jr9zJ75oyWoMjSIvZUfL9APCIHb/'
+ 'kAAAA\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 22 tests, see "Raw output" for the full list of tests.',
+ 'title': '22 tests found',
+ 'raw_details':
+ 'BlogEngine.Tests.Account.Login.InvalidLoginShouldFail\n'
+ 'BlogEngine.Tests.Account.Login.ValidLoginShouldPass\n'
+ 'BlogEngine.Tests.Comments.Comment.CanAddUpdateAndDeleteComment\n'
+ 'BlogEngine.Tests.FileSystem.Crud.CanWriteAndReadAppCodeDirectory\n'
+ 'BlogEngine.Tests.FileSystem.Crud.CanWriteAndReadAppDataDirectory\n'
+ 'BlogEngine.Tests.Navigation.CustomPages.CanNavigateToCustomAspxPage'
+ '\n'
+ 'BlogEngine.Tests.Navigation.CustomPages.CanNavigateToDefaultAspxPag'
+ 'eInSubDiretory\nBlogEngine.Tests.Navigation.SubBlog.MyTest\n'
+ 'BlogEngine.Tests.Navigation.SubBlogAggregation.MyTest\n'
+ 'BlogEngine.Tests.Packaging.Installer.CanInstallAndUninstallTheme\n'
+ 'BlogEngine.Tests.Posts.Post.CanCreateAndDeletePost\n'
+ 'BlogEngine.Tests.QuickNotes.Crud.ShouldBeAbleToCreateUpdateAndDelet'
+ 'eNote\n'
+ 'BlogEngine.Tests.QuickNotes.Navigation.AdminShouldSeeQuickNotesPane'
+ 'l\n'
+ 'BlogEngine.Tests.QuickNotes.Navigation.AnonymousUserShouldNotSeeQui'
+ 'ckNotesPanel\n'
+ 'BlogEngine.Tests.QuickNotes.Navigation.ShouldBeAbleBrowseThroughTab'
+ 's\nBlogEngine.Tests.QuickNotes.Posting.PublishQuickNoteAsPost\n'
+ 'BlogEngine.Tests.Quixote.Runner.RunAvatarTests\n'
+ 'BlogEngine.Tests.Quixote.Runner.RunPackagingTests\n'
+ 'BlogEngine.Tests.Quixote.Runner.RunPagerTests\n'
+ 'BlogEngine.Tests.Quixote.Runner.RunUrlRewriteNoExtensionsTests\n'
+ 'BlogEngine.Tests.Quixote.Runner.RunUrlRewriteTests\n'
+ 'BlogEngine.Tests.Users.AuthorProfile.CanAddUpdateAndDeleteUserProfi'
+ 'le'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.junit-xml b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.junit-xml
new file mode 100644
index 0000000..a069bcd
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.junit-xml
@@ -0,0 +1,170 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.results b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.results
new file mode 100644
index 0000000..6a9c1f3
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.results
@@ -0,0 +1,420 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=13,
+ suite_tests=22,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=264,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='BlogEngine.Tests.Account.Login',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='BlogEngine.Tests.Comments.Comment',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='BlogEngine.Tests.FileSystem.Crud',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='BlogEngine.Tests.Navigation.CustomPages',
+ tests=2,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='BlogEngine.Tests.Navigation.SubBlog',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='BlogEngine.Tests.Navigation.SubBlogAggregation',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='BlogEngine.Tests.Packaging.Installer',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='BlogEngine.Tests.Posts.Post',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='BlogEngine.Tests.QuickNotes.Crud',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='BlogEngine.Tests.QuickNotes.Navigation',
+ tests=3,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='BlogEngine.Tests.QuickNotes.Posting',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='BlogEngine.Tests.Quixote.Runner',
+ tests=5,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='BlogEngine.Tests.Users.AuthorProfile',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.Account.Login.InvalidLoginShouldFail',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.219
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.Account.Login.ValidLoginShouldPass',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.047
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.Comments.Comment.CanAddUpdateAndDeleteComment',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=16.392
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.FileSystem.Crud.CanWriteAndReadAppCodeDirectory',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=60.395
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.FileSystem.Crud.CanWriteAndReadAppDataDirectory',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.016
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.Navigation.CustomPages.CanNavigateToCustomAspxPage',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.578
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.Navigation.CustomPages.'
+ 'CanNavigateToDefaultAspxPageInSubDiretory',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.375
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.Navigation.SubBlog.MyTest',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.Navigation.SubBlogAggregation.MyTest',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.Packaging.Installer.CanInstallAndUninstallTheme',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=51.051
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.Posts.Post.CanCreateAndDeletePost',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=33.346
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.QuickNotes.Crud.'
+ 'ShouldBeAbleToCreateUpdateAndDeleteNote',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=6.203
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.QuickNotes.Navigation.AdminShouldSeeQuickNotesPanel',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.719
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.QuickNotes.Navigation.'
+ 'AnonymousUserShouldNotSeeQuickNotesPanel',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.063
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.QuickNotes.Navigation.ShouldBeAbleBrowseThroughTabs',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.032
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.QuickNotes.Posting.PublishQuickNoteAsPost',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=10.219
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.Quixote.Runner.RunAvatarTests',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=1.813
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.Quixote.Runner.RunPackagingTests',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=16.204
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.Quixote.Runner.RunPagerTests',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=23.095
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.Quixote.Runner.RunUrlRewriteNoExtensionsTests',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=3.188
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.Quixote.Runner.RunUrlRewriteTests',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=8.0
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-correct3.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='BlogEngine.Tests.Users.AuthorProfile.CanAddUpdateAndDeleteUserProfile',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=22.049
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.xml b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.xml
new file mode 100644
index 0000000..8208041
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-correct3.xml
@@ -0,0 +1,171 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations
new file mode 100644
index 0000000..ab57926
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.annotations
@@ -0,0 +1,60 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '1 fail, 2 pass in 0s',
+ 'summary':
+ '3 tests\u2002\u2003\u20032 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20030 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20031 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MOw6AIBBEr'
+ '0KoLfx0XoYQ1LhRwCxQGe/uykfo5s1M3s03OFfHZzZ0jLsA/ocloPRgDWFPSIP/pqlk4Y'
+ 'JSVIy1OOBq32KTcGZbKlZEi/mCwRTfF1td4mqL3Mgity5ltQZPkBNzu+TPC/n9SCLdAAA'
+ 'A\n',
+ 'annotations': [
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-failure.xml\u2003[took 0s]',
+ 'title': 'UnitTests.MainClassTest.TestFailure failed',
+ 'raw_details':
+ ' Expected failure\n Expected: 30\n But was: 20\n at '
+ 'UnitTests.MainClassTest.TestFailure () [0x00000] \n at <0x00000> '
+ '\n at (wrapper managed-to-native) '
+ 'System.Reflection.MonoMethod:InternalInvoke (object,object[])\n '
+ 'at System.Reflection.MonoMethod.Invoke (System.Object obj, '
+ 'BindingFlags invokeAttr, System.Reflection.Binder binder, '
+ 'System.Object[] parameters, System.Globalization.CultureInfo '
+ 'culture) [0x00000]'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 3 tests, see "Raw output" for the full list of tests.',
+ 'title': '3 tests found',
+ 'raw_details':
+ 'UnitTests.MainClassTest.TestFailure\n'
+ 'UnitTests.MainClassTest.TestMethodUpdateValue\n'
+ 'UnitTests.MainClassTest.TestPropertyValue'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-failure.junit-xml b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.junit-xml
new file mode 100644
index 0000000..3edf7e7
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.junit-xml
@@ -0,0 +1,26 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+ at UnitTests.MainClassTest.TestFailure () [0x00000]
+ at <0x00000> <unknown method>
+ at (wrapper managed-to-native) System.Reflection.MonoMethod:InternalInvoke (object,object[])
+ at System.Reflection.MonoMethod.Invoke (System.Object obj, BindingFlags invokeAttr, System.Reflection.Binder binder, System.Object[] parameters, System.Globalization.CultureInfo culture) [0x00000]
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-failure.results b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.results
new file mode 100644
index 0000000..b9a0357
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.results
@@ -0,0 +1,67 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=3,
+ suite_skipped=0,
+ suite_failures=1,
+ suite_errors=0,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='UnitTests.MainClassTest',
+ tests=3,
+ skipped=0,
+ failures=1,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-failure.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.MainClassTest.TestPropertyValue',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.146
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-failure.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.MainClassTest.TestMethodUpdateValue',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-failure.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.MainClassTest.TestFailure',
+ result='failure',
+ message=' Expected failure\n Expected: 30\n But was: 20\n',
+ content=' at UnitTests.MainClassTest.TestFailure () [0x00000] \n at '
+ '<0x00000> \n at (wrapper managed-to-native) '
+ 'System.Reflection.MonoMethod:InternalInvoke (object,object[])\n at '
+ 'System.Reflection.MonoMethod.Invoke (System.Object obj, BindingFlags '
+ 'invokeAttr, System.Reflection.Binder binder, System.Object[] '
+ 'parameters, System.Globalization.CultureInfo culture) [0x00000] \n',
+ stdout=None,
+ stderr=None,
+ time=0.092
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-failure.xml b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.xml
new file mode 100644
index 0000000..d9c5be8
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-failure.xml
@@ -0,0 +1,32 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ at (wrapper managed-to-native) System.Reflection.MonoMethod:InternalInvoke (object,object[])
+ at System.Reflection.MonoMethod.Invoke (System.Object obj, BindingFlags invokeAttr, System.Reflection.Binder binder, System.Object[] parameters, System.Globalization.CultureInfo culture) [0x00000]
+]]>
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations
new file mode 100644
index 0000000..e135167
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.annotations
@@ -0,0 +1,66 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '1 fail, 9 pass in 1s',
+ 'summary':
+ '10 tests\u2002\u2003\u20039 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '1s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '\u205f\u20041 suites\u2003\u20030 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n\u205f\u2004'
+ '1 files\u2004\u2002\u2003\u20031 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MQQ6AIAwEv'
+ '0I4e9CjfoYQhNgoYAqcjH8XEaG3zu52Lm7g0IEvbBoYDwligzWhjOBdxVzEUo0/iJCUys'
+ 'ncgx3OHPSFkXDQf6ERPdYJJteE7019H3ddYWIrTGXKWwsxQ71Y2CS/HxbYkAffAAAA\n',
+ 'annotations': [
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-healthReport.xml\u2003[took 0s]',
+ 'title': 'UnitTests.MainClassTest.TestFailure failed',
+ 'raw_details':
+ ' Expected failure\n Expected: 30\n But was: 20\n at '
+ 'UnitTests.MainClassTest.TestFailure () [0x00000]\n at <0x00000> '
+ '\n at (wrapper managed-to-native) '
+ 'System.Reflection.MonoMethod:InternalInvoke (object,object[])\n '
+ 'at System.Reflection.MonoMethod.Invoke (System.Object obj, '
+ 'BindingFlags invokeAttr, System.Reflection.Binder binder, '
+ 'System.Object[] parameters, System.Globalization.CultureInfo '
+ 'culture) [0x00000]'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 10 tests, see "Raw output" for the full list of tests.',
+ 'title': '10 tests found',
+ 'raw_details':
+ 'UnitTests.MainClassTest.TestFailure\n'
+ 'UnitTests.MainClassTest.TestMethodUpdateValue\n'
+ 'UnitTests.MainClassTest.TestPropertyValue\n'
+ 'UnitTests.MainClassTest.TestPropertyValue1\n'
+ 'UnitTests.MainClassTest.TestPropertyValue2\n'
+ 'UnitTests.MainClassTest.TestPropertyValue3\n'
+ 'UnitTests.MainClassTest.TestPropertyValue4\n'
+ 'UnitTests.MainClassTest.TestPropertyValue5\n'
+ 'UnitTests.MainClassTest.TestPropertyValue6\n'
+ 'UnitTests.MainClassTest.TestPropertyValue7'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.junit-xml b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.junit-xml
new file mode 100644
index 0000000..d80dd90
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.junit-xml
@@ -0,0 +1,33 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+ at UnitTests.MainClassTest.TestFailure () [0x00000]
+ at <0x00000> <unknown method>
+ at (wrapper managed-to-native) System.Reflection.MonoMethod:InternalInvoke (object,object[])
+ at System.Reflection.MonoMethod.Invoke (System.Object obj, BindingFlags invokeAttr, System.Reflection.Binder binder, System.Object[] parameters, System.Globalization.CultureInfo culture) [0x00000]
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.results b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.results
new file mode 100644
index 0000000..4aca78f
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.results
@@ -0,0 +1,158 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=10,
+ suite_skipped=0,
+ suite_failures=1,
+ suite_errors=0,
+ suite_time=1,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='UnitTests.MainClassTest',
+ tests=10,
+ skipped=0,
+ failures=1,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-healthReport.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.MainClassTest.TestPropertyValue',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.146
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-healthReport.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.MainClassTest.TestMethodUpdateValue',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-healthReport.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.MainClassTest.TestFailure',
+ result='failure',
+ message=' Expected failure\n Expected: 30\n But was: 20\n',
+ content=' at UnitTests.MainClassTest.TestFailure () [0x00000]\n at '
+ '<0x00000> \n at (wrapper managed-to-native) '
+ 'System.Reflection.MonoMethod:InternalInvoke (object,object[])\n at '
+ 'System.Reflection.MonoMethod.Invoke (System.Object obj, BindingFlags '
+ 'invokeAttr, System.Reflection.Binder binder, System.Object[] '
+ 'parameters, System.Globalization.CultureInfo culture) [0x00000]\n',
+ stdout=None,
+ stderr=None,
+ time=0.092
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-healthReport.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.MainClassTest.TestPropertyValue1',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.146
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-healthReport.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.MainClassTest.TestPropertyValue2',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.146
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-healthReport.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.MainClassTest.TestPropertyValue3',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.146
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-healthReport.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.MainClassTest.TestPropertyValue4',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.146
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-healthReport.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.MainClassTest.TestPropertyValue5',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.146
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-healthReport.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.MainClassTest.TestPropertyValue6',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.146
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-healthReport.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.MainClassTest.TestPropertyValue7',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.146
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.xml b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.xml
new file mode 100644
index 0000000..4d0f8e9
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-healthReport.xml
@@ -0,0 +1,39 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ at (wrapper managed-to-native) System.Reflection.MonoMethod:InternalInvoke (object,object[])
+ at System.Reflection.MonoMethod.Invoke (System.Object obj, BindingFlags invokeAttr, System.Reflection.Binder binder, System.Object[] parameters, System.Globalization.CultureInfo culture) [0x00000]
+]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations
new file mode 100644
index 0000000..1ae3d02
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.annotations
@@ -0,0 +1,56 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 1 tests pass, 2 skipped in 0s',
+ 'summary':
+ '3 tests\u2002\u2003\u20031 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '0s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20032 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20030 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MSw6AIAxEr'
+ '0JYu/Cz8zKGIMRGBdPCynh3KyKymzfTvlNa2AzJUXSNkBQhFJgjqgDeMbaMPIRnGr48Ud'
+ 'Q63+ZihYOLvhRWwVa/TwbRY24wus/3xFr38m9LXMkS1y7t9x0CQ06CFiWvGx5uWF7dAAA'
+ 'A\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There are 2 skipped tests, see "Raw output" for the full list of '
+ 'skipped tests.',
+ 'title': '2 skipped tests found',
+ 'raw_details':
+ 'UnitTests.OtherMainClassTest.TestIgnored\n'
+ 'UnitTests.OtherMainClassTest.TestIgnoredWithText'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 3 tests, see "Raw output" for the full list of tests.',
+ 'title': '3 tests found',
+ 'raw_details':
+ 'UnitTests.OtherMainClassTest.TestIgnored\n'
+ 'UnitTests.OtherMainClassTest.TestIgnoredWithText\n'
+ 'UnitTests.OtherMainClassTest.TestPropertyValue'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.junit-xml b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.junit-xml
new file mode 100644
index 0000000..c7f446e
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.junit-xml
@@ -0,0 +1,24 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.results b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.results
new file mode 100644
index 0000000..2ac5fad
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.results
@@ -0,0 +1,62 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=3,
+ suite_skipped=2,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=0,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='OtherMainClassTest',
+ tests=3,
+ skipped=2,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-ignored.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.OtherMainClassTest.TestIgnored',
+ result='skipped',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=None
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-ignored.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.OtherMainClassTest.TestIgnoredWithText',
+ result='skipped',
+ message='Dont do this',
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=None
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-ignored.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='UnitTests.OtherMainClassTest.TestPropertyValue',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=0.001
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.xml b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.xml
new file mode 100644
index 0000000..60690e7
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-ignored.xml
@@ -0,0 +1,29 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations
new file mode 100644
index 0000000..8859c1d
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.annotations
@@ -0,0 +1,45 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 6 tests pass in 35s',
+ 'summary':
+ '6 tests\u2002\u2003\u20036 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003\u2003'
+ '35s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '1 suites\u2003\u20030 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\n1 '
+ 'files\u2004\u2002\u2003\u20030 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMwQqAIBAFf'
+ '0U8dyiiDv1MiBktpcaqp+jf20pLb2/mwRx8hk05PrCmYtwF8B9MAYUHawjbjpgef3992q'
+ 'MLUpZihZ1E/YlZwFYIhWgxGgwm9e6Z517+aw9nsYfzlrRagyeIi7lF8PMC7eTeEN4AAAA'
+ '=\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 6 tests, see "Raw output" for the full list of tests.',
+ 'title': '6 tests found',
+ 'raw_details':
+ 'testChangePassword\ntestChangePasswordFailEmptyForm\n'
+ 'testChangePasswordFailNewPasswordKO\n'
+ 'testChangePasswordFailNewPasswordNotRepeated\n'
+ 'testChangePasswordFailNewPasswordTooShort\n'
+ 'testChangePasswordFailOldPasswordKO'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.junit-xml b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.junit-xml
new file mode 100644
index 0000000..7c4df81
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.junit-xml
@@ -0,0 +1,13 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.results b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.results
new file mode 100644
index 0000000..d9946bb
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.results
@@ -0,0 +1,101 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=1,
+ suite_tests=6,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=35,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='AdministrationPasswordTest',
+ tests=6,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-issue1077.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='testChangePasswordFailEmptyForm',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.065
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-issue1077.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='testChangePasswordFailOldPasswordKO',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.066
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-issue1077.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='testChangePasswordFailNewPasswordTooShort',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.049
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-issue1077.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='testChangePasswordFailNewPasswordNotRepeated',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.05
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-issue1077.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='testChangePasswordFailNewPasswordKO',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=5.066
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-issue1077.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='testChangePassword',
+ result='success',
+ message=None,
+ content=None,
+ stdout=None,
+ stderr=None,
+ time=10.1
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.xml b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.xml
new file mode 100644
index 0000000..ef13506
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue1077.xml
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue17521.exception b/python/test/files/nunit/nunit3/jenkins/NUnit-issue17521.exception
new file mode 100644
index 0000000..9ef72ea
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue17521.exception
@@ -0,0 +1 @@
+ParseError: file='files/nunit/nunit3/jenkins/NUnit-issue17521.xml', message='Char 0x0 out of allowed range, line 33, column 16 (NUnit-issue17521.xml, line 33)', line=None, column=None, exception=XMLSyntaxError('Char 0x0 out of allowed range, line 33, column 16')
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue17521.xml b/python/test/files/nunit/nunit3/jenkins/NUnit-issue17521.xml
new file mode 100644
index 0000000..a91fd60
Binary files /dev/null and b/python/test/files/nunit/nunit3/jenkins/NUnit-issue17521.xml differ
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations
new file mode 100644
index 0000000..7a7fadd
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.annotations
@@ -0,0 +1,62 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'success',
+ 'output': {
+ 'title': 'All 1 tests pass, 1 skipped in 6s',
+ 'summary':
+ '1 files\u2004\u20032 suites\u2004\u2003\u20026s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '2 tests\u20031 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '1 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '0 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n2 runs\u2006\u2003'
+ '2 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '0 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '0 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/1WMMQ6AIBAEv'
+ '0KoLdTCws8YghAvIpgDKuPfPREUu505mINrMMrzkXUN4z5CSNATzBFFAGcJB0I6hHJKe/'
+ 'JRyvwxixX2n9ACDIn2FQrRYTYYbends+Q+fmrlaR1LXLek2zYIBHkxvwh+XlEX1VPdAAA'
+ 'A\n',
+ 'annotations': [
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message':
+ 'There is 1 skipped test, see "Raw output" for the name of the '
+ 'skipped test.',
+ 'title': '1 skipped test found',
+ 'raw_details': 'AddTwoNumbers'
+ },
+ {
+ 'path': '.github',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'notice',
+ 'message': 'There are 2 tests, see "Raw output" for the full list of tests.',
+ 'title': '2 tests found',
+ 'raw_details':
+ 'AddTwoNumbers\n'
+ 'SubSmokeTest("Geo","Geonw","dev1234567",System.String[])'
+ }
+ ]
+ }
+ }
+]
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.junit-xml b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.junit-xml
new file mode 100644
index 0000000..95adb0c
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.junit-xml
@@ -0,0 +1,70 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Given I have entered 50 into the calculator
+No matching step definition found for the step. Use the following code to create one:
+ [Given(@"I have entered (.*) into the calculator")]
+public void GivenIHaveEnteredIntoTheCalculator(int p0)
+{
+ ScenarioContext.Current.Pending();
+}
+
+And I have entered 70 into the calculator
+No matching step definition found for the step. Use the following code to create one:
+ [Given(@"I have entered (.*) into the calculator")]
+public void GivenIHaveEnteredIntoTheCalculator(int p0)
+{
+ ScenarioContext.Current.Pending();
+}
+
+When I press add
+No matching step definition found for the step. Use the following code to create one:
+ [When(@"I press add")]
+public void WhenIPressAdd()
+{
+ ScenarioContext.Current.Pending();
+}
+
+Then the result should be 120 on the screen
+No matching step definition found for the step. Use the following code to create one:
+ [Then(@"the result should be (.*) on the screen")]
+public void ThenTheResultShouldBeOnTheScreen(int p0)
+{
+ ScenarioContext.Current.Pending();
+}
+
+
+
+
+
+
+
+
+
+ Given I have an active Sub user Geo with username Geonw and password dev1234567
+done: LoginSteps.GivenIHaveAnActiveSubUserWithUsernameAndPassword("Geo", "Geonw", "dev1234567") (0.0s)
+And he is on Sub login page
+done: LoginSteps.GivenHeIsOnSubLoginPage() (0.6s)
+When he logs in using his credentials
+done: LoginSteps.WhenHeLogsInUsingHisCredentials() (1.8s)
+Then he should land on the Accounts homepage
+done: LoginSteps.ThenHeShouldLandOnTheAccountsHomePage() (0.3s)
+
+
+
+
+
+
+
+
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.results b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.results
new file mode 100644
index 0000000..f61b5ee
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.results
@@ -0,0 +1,92 @@
+publish.unittestresults.ParsedUnitTestResults(
+ files=1,
+ errors=[],
+ suites=2,
+ suite_tests=2,
+ suite_skipped=0,
+ suite_failures=0,
+ suite_errors=0,
+ suite_time=6,
+ suite_details=[
+ publish.unittestresults.UnitTestSuite(
+ name='Automation.Features.CMFeature',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ ),
+ publish.unittestresults.UnitTestSuite(
+ name='Automation.Features.SubFeature.SubSmokeTest',
+ tests=1,
+ skipped=0,
+ failures=0,
+ errors=0,
+ stdout=None,
+ stderr=None
+ )
+ ],
+ cases=[
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-issue33493.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='AddTwoNumbers',
+ result='skipped',
+ message='No matching step definition found for one or more steps.\nusing '
+ 'System;\nusing TechTalk.SpecFlow;\n\nnamespace MyNamespace\n{\n '
+ '[Binding]\n public class StepDefinitions\n {\n '
+ '[Given(@"I have entered (.*) into the calculator")]\npublic void '
+ 'GivenIHaveEnteredIntoTheCalculator(int p0)\n{\n '
+ 'ScenarioContext.Current.Pending();\n}\n\n [When(@"I press '
+ 'add")]\npublic void WhenIPressAdd()\n{\n '
+ 'ScenarioContext.Current.Pending();\n}\n\n [Then(@"the result '
+ 'should be (.*) on the screen")]\npublic void '
+ 'ThenTheResultShouldBeOnTheScreen(int p0)\n{\n '
+ 'ScenarioContext.Current.Pending();\n}\n }\n}\n',
+ content=None,
+ stdout='Given I have entered 50 into the calculator\nNo matching step '
+ 'definition found for the step. Use the following code to create one:\n'
+ ' [Given(@"I have entered (.*) into the calculator")]\npublic '
+ 'void GivenIHaveEnteredIntoTheCalculator(int p0)\n{\n '
+ 'ScenarioContext.Current.Pending();\n}\n\nAnd I have entered 70 into '
+ 'the calculator\nNo matching step definition found for the step. Use '
+ 'the following code to create one:\n [Given(@"I have entered '
+ '(.*) into the calculator")]\npublic void '
+ 'GivenIHaveEnteredIntoTheCalculator(int p0)\n{\n '
+ 'ScenarioContext.Current.Pending();\n}\n\nWhen I press add\nNo '
+ 'matching step definition found for the step. Use the following code '
+ 'to create one:\n [When(@"I press add")]\npublic void '
+ 'WhenIPressAdd()\n{\n ScenarioContext.Current.Pending();\n}\n\n'
+ 'Then the result should be 120 on the screen\nNo matching step '
+ 'definition found for the step. Use the following code to create one:\n'
+ ' [Then(@"the result should be (.*) on the screen")]\npublic '
+ 'void ThenTheResultShouldBeOnTheScreen(int p0)\n{\n '
+ 'ScenarioContext.Current.Pending();\n}\n\n',
+ stderr=None,
+ time=0.186579
+ ),
+ publish.unittestresults.UnitTestCase(
+ result_file='nunit3/jenkins/NUnit-issue33493.xml',
+ test_file=None,
+ line=None,
+ class_name='',
+ test_name='SubSmokeTest("Geo","Geonw","dev1234567",System.String[])',
+ result='success',
+ message=None,
+ content=None,
+ stdout='Given I have an active Sub user Geo with username Geonw and password '
+ 'dev1234567\ndone: '
+ 'LoginSteps.GivenIHaveAnActiveSubUserWithUsernameAndPassword("Geo", '
+ '"Geonw", "dev1234567") (0.0s)\nAnd he is on Sub login page\ndone: '
+ 'LoginSteps.GivenHeIsOnSubLoginPage() (0.6s)\nWhen he logs in using '
+ 'his credentials\ndone: LoginSteps.WhenHeLogsInUsingHisCredentials() '
+ '(1.8s)\nThen he should land on the Accounts homepage\ndone: '
+ 'LoginSteps.ThenHeShouldLandOnTheAccountsHomePage() (0.3s)\n',
+ stderr=None,
+ time=6.40203
+ )
+ ]
+)
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.xml b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.xml
new file mode 100644
index 0000000..27a88c6
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue33493.xml
@@ -0,0 +1,121 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations b/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations
new file mode 100644
index 0000000..ec2363d
--- /dev/null
+++ b/python/test/files/nunit/nunit3/jenkins/NUnit-issue44527.annotations
@@ -0,0 +1,5591 @@
+[
+ {
+ 'name': 'Test Results',
+ 'head_sha': 'commit sha',
+ 'status': 'completed',
+ 'conclusion': 'failure',
+ 'output': {
+ 'title': '140 fail, 6 pass in 14m 11s',
+ 'summary':
+ '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002'
+ '14m 11s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '146 tests\u2003\u205f\u2004\u205f\u20046 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '0 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '140 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004'
+ '6 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '0 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '144 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr'
+ '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I'
+ 'W47TKN1itFqhArnCTqT2gJWzj61YxoMC2hP+LLDGVl5L8x8FfYZlP2KbPv4AjSxOwi+f0'
+ 'AEAq2iOkAAAA=\n',
+ 'annotations': [
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 31s]',
+ 'title':
+ 'UI_MyTask_MR_Grid_GridViewValidation(True,"chrome","/#/tasks/access'
+ '-certification/overview") failed',
+ 'raw_details':
+ "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 "
+ "seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such "
+ "element: Unable to locate element: "
+ "{\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}"
+ "\n (Session info: chrome=58.0.3029.110)\n (Driver info: "
+ "chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 0 "
+ "milliseconds\nFor documentation on this error, please visit: "
+ "http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: "
+ "version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 "
+ "-0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', "
+ "os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: "
+ "'6.3', java.version: '1.8.0_66'\nDriver info: "
+ "org.openqa.selenium.chrome.ChromeDriver\nCapabilities "
+ "[{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir4620_214"
+ "43}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "92de3e3859b5e9d2cb692461ba367ced\n*** Element info: {Using=xpath, "
+ "value=//span[@translate='_Loading_']}\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri"
+ "ng exceptionMessage, Exception lastException)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base"
+ ".cs:line 537\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks._My_Requests.Grid.GridValidation.UI"
+ "_MyTask_MR_Grid_GridViewValidation(Boolean excute, String "
+ "browserName, String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\My Requests\\Grid\\GridValidation.cs:line 29\n"
+ "--NoSuchElementException\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String "
+ "mechanism, String value)\n at "
+ "OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<"
+ "ElementIsVisible>b__12(IWebDriver driver)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 30s]',
+ 'title':
+ 'All 2 runs failed: '
+ 'UI_MyTask_MR_Grid_Paging(True,"chrome","/#/tasks/access-certificati'
+ 'on/overview")',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks._My_Requests.Grid.GridValidation.UI'
+ '_MyTask_MR_Grid_Paging(Boolean excute, String browserName, String '
+ 'url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\My Requests\\Grid\\GridValidation.cs:line 65'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 30s]',
+ 'title':
+ 'UI_MyTasks_MR_Paging_ShowPerPage(True,"chrome","/#/tasks/access-req'
+ 'uest/overview") failed',
+ 'raw_details':
+ "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 "
+ "seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such "
+ "element: Unable to locate element: "
+ "{\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}"
+ "\n (Session info: chrome=58.0.3029.110)\n (Driver info: "
+ "chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 0 "
+ "milliseconds\nFor documentation on this error, please visit: "
+ "http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: "
+ "version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 "
+ "-0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', "
+ "os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: "
+ "'6.3', java.version: '1.8.0_66'\nDriver info: "
+ "org.openqa.selenium.chrome.ChromeDriver\nCapabilities "
+ "[{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir10904_14"
+ "349}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "8d83262a43a60462d9eaed6fd8eec81c\n*** Element info: {Using=xpath, "
+ "value=//span[@translate='_Loading_']}\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri"
+ "ng exceptionMessage, Exception lastException)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base"
+ ".cs:line 537\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks._My_Requests.Paging.PagingValidatio"
+ "n.UI_MyTasks_MR_Paging_ShowPerPage(Boolean excute, String "
+ "browserName, String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\My Requests\\Paging\\PagingValidation.cs:line 30\n"
+ "--NoSuchElementException\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String "
+ "mechanism, String value)\n at "
+ "OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<"
+ "ElementIsVisible>b__12(IWebDriver driver)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_OE_Grid_GridViewValidation(True,"chrome","/#/tasks/access'
+ '-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks._Owner_Election.Grid.GridValidation'
+ '.UI_MyTask_OE_Grid_GridViewValidation(Boolean excute, String '
+ 'browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Owner Election\\Grid\\GridValidation.cs:line 28'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTasks_OE_ViewOwnersElection("/#/tasks/owners-election/overview'
+ '") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at '
+ 'MyCompanyUiSettings.Bl.OwnersElectionPage.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Owne'
+ 'rsElectionPage.cs:line 175\n at '
+ 'MyCompanyUiSettings.Bl.OwnersElectionPage..ctor(IWebDriver driver) '
+ 'in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Owne'
+ 'rsElectionPage.cs:line 145\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks._Owner_Election.Paging.PagingValida'
+ 'tion.UI_MyTasks_OE_ViewOwnersElection(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Owner Election\\Paging\\PagingValidation.cs:line 40'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 30s]',
+ 'title':
+ 'All 2 runs failed: '
+ 'UI_MyTask_MR_Progress_Approve(True,"chrome","/#/tasks/access-certif'
+ 'ication/overview")',
+ 'raw_details':
+ "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 "
+ "seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such "
+ "element: Unable to locate element: "
+ "{\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}"
+ "\n (Session info: chrome=58.0.3029.110)\n (Driver info: "
+ "chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 0 "
+ "milliseconds\nFor documentation on this error, please visit: "
+ "http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: "
+ "version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 "
+ "-0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', "
+ "os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: "
+ "'6.3', java.version: '1.8.0_66'\nDriver info: "
+ "org.openqa.selenium.chrome.ChromeDriver\nCapabilities "
+ "[{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir12612_29"
+ "006}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "d099a8dab51ddac1ad57f17fd01208dc\n*** Element info: {Using=xpath, "
+ "value=//span[@translate='_Loading_']}\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri"
+ "ng exceptionMessage, Exception lastException)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base"
+ ".cs:line 537\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks._Owner_Election.Progress.ProgressVa"
+ "lidation.UI_MyTask_MR_Progress_Approve(Boolean excute, String "
+ "browserName, String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\Owner Election\\Progress\\ProgressValidation.cs:line 26\n"
+ "--NoSuchElementException\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String "
+ "mechanism, String value)\n at "
+ "OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<"
+ "ElementIsVisible>b__12(IWebDriver driver)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'All 2 runs failed: '
+ 'UI_MyTask_MR_Progress_Reject(True,"chrome","/#/tasks/access-certifi'
+ 'cation/overview")',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks._Owner_Election.Progress.ProgressVa'
+ 'lidation.UI_MyTask_MR_Progress_Reject(Boolean excute, String '
+ 'browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Owner Election\\Progress\\ProgressValidation.cs:line 74'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_ApproveAll_AddCommentYes_TC2689("/#/t'
+ 'asks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Approve_All.ApproveAll.UI_MyTask_AC'
+ '_ACIS_BulkActions_ApproveAll_AddCommentYes_TC2689(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Approve All\\ApproveAll.cs:line 29'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_ApproveAll_CommittedRecoredNotAffecte'
+ 'd_TC2691("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Approve_All.ApproveAll.UI_MyTask_AC'
+ '_ACIS_BulkActions_ApproveAll_CommittedRecoredNotAffected_TC2691(Str'
+ 'ing url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Approve All\\ApproveAll.cs:line 75'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_ApproveAll_WithExistingSaved_TC2690("'
+ '/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Approve_All.ApproveAll.UI_MyTask_AC'
+ '_ACIS_BulkActions_ApproveAll_WithExistingSaved_TC2690(String url) '
+ 'in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Approve All\\ApproveAll.cs:line 47'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 11s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_Browsers_Chrome_TC2692("/#/tasks/acce'
+ 'ss-certification/overview") failed',
+ 'raw_details':
+ ' Expected: True\n But was: False\nat '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Browsers.Browsers.UI_MyTask_AC_ACIS'
+ '_BulkActions_Browsers_Chrome_TC2692(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Browsers\\Browsers.cs:line 41'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 6s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_ClearAll_ApproveAllNo_TC2707("/#/task'
+ 's/access-certification/overview") failed',
+ 'raw_details':
+ 'System.Exception : Base Class - Click(string xpath) method threw '
+ 'an exception : \nunknown error: Element is not '
+ 'clickable at point (80, 241). Other element would receive the '
+ 'click: ...
\n (Session info: chrome=58.0.3029.110)\n '
+ '(Driver info: chromedriver=2.29.461591 '
+ '(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT '
+ '6.3.9600 x86_64) (WARNING: The server did not provide any '
+ 'stacktrace information)\nCommand duration or timeout: 50 '
+ 'milliseconds\nBuild info: version: \'3.1.0\', revision: '
+ '\'86a5d70\', time: \'2017-02-16 07:57:44 -0800\'\nSystem info: '
+ 'host: \'BRC-JENKINS2-AU\', ip: \'172.16.61.17\', os.name: '
+ '\'Windows Server 2012 R2\', os.arch: \'x86\', os.version: \'6.3\', '
+ 'java.version: \'1.8.0_66\'\nDriver info: '
+ 'org.openqa.selenium.chrome.ChromeDriver\nCapabilities '
+ '[{applicationCacheEnabled=false, rotatable=false, '
+ 'mobileEmulationEnabled=false, networkConnectionEnabled=false, '
+ 'chrome={chromedriverVersion=2.29.461591 '
+ '(62ebf098771772160f391d75e589dc567915b233), '
+ 'userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir2476_115'
+ '8}, takesHeapSnapshot=true, pageLoadStrategy=normal, '
+ 'databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, '
+ 'version=58.0.3029.110, platform=WIN8_1, '
+ 'browserConnectionEnabled=false, nativeEvents=true, '
+ 'acceptSslCerts=true, locationContextEnabled=true, '
+ 'webStorageEnabled=true, browserName=chrome, takesScreenshot=true, '
+ 'javascriptEnabled=true, cssSelectorsEnabled=true, '
+ 'unexpectedAlertBehaviour=}]\nSession ID: '
+ '5cb1002259d4ed7ed523ba2e9e0cea02\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at '
+ 'MyCompanyUiSettings.Bl.Base.Click(String xpath) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 323\n at MyCompanyUiSettings.Bl.Base.Click(String '
+ 'xpath) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 330\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Clear_All.ClearAll.UI_MyTask_AC_ACI'
+ 'S_BulkActions_ClearAll_ApproveAllNo_TC2707(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Clear All\\ClearAll.cs:line 90'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_ClearAll_CommittedRecoredNotAffected_'
+ 'TC2708("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Clear_All.ClearAll.UI_MyTask_AC_ACI'
+ 'S_BulkActions_ClearAll_CommittedRecoredNotAffected_TC2708(String '
+ 'url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Clear All\\ClearAll.cs:line 102'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_RejectAll_AddCommentNo_TC2705("/#/tas'
+ 'ks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Clear_All.ClearAll.UI_MyTask_AC_ACI'
+ 'S_BulkActions_RejectAll_AddCommentNo_TC2705(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Clear All\\ClearAll.cs:line 13'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_RejectAll_AddCommentYes_TC2706("/#/ta'
+ 'sks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Clear_All.ClearAll.UI_MyTask_AC_ACI'
+ 'S_BulkActions_RejectAll_AddCommentYes_TC2706(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Clear All\\ClearAll.cs:line 32'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_ClearAllSelectio'
+ 'n_TC2712("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Examine_Text_and_Layout.ExaminTextA'
+ 'ndLayout.UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_ClearAl'
+ 'lSelection_TC2712(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Examine Text and '
+ 'Layout\\ExaminTextAndLayout.cs:line 67'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_MainMenu_TC2709('
+ '"/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Examine_Text_and_Layout.ExaminTextA'
+ 'ndLayout.UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_MainMen'
+ 'u_TC2709(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Examine Text and '
+ 'Layout\\ExaminTextAndLayout.cs:line 15'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_RejectAllSelecti'
+ 'on_TC2711("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Examine_Text_and_Layout.ExaminTextA'
+ 'ndLayout.UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_RejectA'
+ 'llSelection_TC2711(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Examine Text and '
+ 'Layout\\ExaminTextAndLayout.cs:line 50'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 35s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_MainMenu_TC2713('
+ '"/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.Exception : Base Class - FindElementsOnPage(string xpath) - '
+ '1 parameter - method threw an exception : \nTimed out after 30 '
+ 'seconds\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri'
+ 'ng exceptionMessage, Exception lastException)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at '
+ 'MyCompanyUiSettings.Bl.Base.WaitForVisibleElement(String xpath) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 297\n at '
+ 'MyCompanyUiSettings.Bl.Base.FindElementsOnPage(String xpath) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 247\n at '
+ 'MyCompanyUiSettings.Bl.Base.FindElementsOnPage(String xpath) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 253\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Load.Load.UI_MyTask_AC_ACIS_BulkAct'
+ 'ions_ExamineTextAndLayout_MainMenu_TC2713(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Load\\Load.cs:line 15'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_ExamineTextAndLayout_MainMenu_TC2714('
+ '"/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Load.Load.UI_MyTask_AC_ACIS_BulkAct'
+ 'ions_ExamineTextAndLayout_MainMenu_TC2714(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Load\\Load.cs:line 34'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 35s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_RejectAll_AddCommentNo_TC2715("/#/tas'
+ 'ks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.Exception : Base Class - FindElementsOnPage(string xpath) - '
+ '1 parameter - method threw an exception : \nTimed out after 30 '
+ 'seconds\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri'
+ 'ng exceptionMessage, Exception lastException)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at '
+ 'MyCompanyUiSettings.Bl.Base.WaitForVisibleElement(String xpath) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 297\n at '
+ 'MyCompanyUiSettings.Bl.Base.FindElementsOnPage(String xpath) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 247\n at '
+ 'MyCompanyUiSettings.Bl.Base.FindElementsOnPage(String xpath) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 253\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Reject_All.ApproveAll.UI_MyTask_AC_'
+ 'ACIS_BulkActions_RejectAll_AddCommentNo_TC2715(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Reject All\\RejectAll.cs:line 14'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_RejectAll_AddCommentYes_TC2716("/#/ta'
+ 'sks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Reject_All.ApproveAll.UI_MyTask_AC_'
+ 'ACIS_BulkActions_RejectAll_AddCommentYes_TC2716(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Reject All\\RejectAll.cs:line 29'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_RejectAll_CommittedRecoredNotAffected'
+ '_TC2718("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Reject_All.ApproveAll.UI_MyTask_AC_'
+ 'ACIS_BulkActions_RejectAll_CommittedRecoredNotAffected_TC2718(Strin'
+ 'g url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Reject All\\RejectAll.cs:line 75'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_RejectAll_WithExistingSaved_TC2717("/'
+ '#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Reject_All.ApproveAll.UI_MyTask_AC_'
+ 'ACIS_BulkActions_RejectAll_WithExistingSaved_TC2717(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Reject All\\RejectAll.cs:line 47'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_BulkActions_Saving_Saving_IsSynchronous_NoOtherAc'
+ 'tionCanBeTaken_2722("/#/tasks/access-certification/overview") '
+ 'failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Bulk_Acions.Saving.Saving.UI_MyTask_AC_ACIS_Bul'
+ 'kActions_Saving_Saving_IsSynchronous_NoOtherActionCanBeTaken_2722(S'
+ 'tring url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Bulk Acions\\Saving\\Saving.cs:line 27'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 4s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColumApproveAll_T'
+ 'C2741("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ "System.InvalidOperationException : unknown error: Element is not "
+ "clickable at point (932, 731)\n (Session info: "
+ "chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 65 "
+ "milliseconds\nBuild info: version: '3.1.0', revision: '86a5d70', "
+ "time: '2017-02-16 07:57:44 -0800'\nSystem info: host: "
+ "'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server "
+ "2012 R2', os.arch: 'x86', os.version: '6.3', java.version: "
+ "'1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\n"
+ "Capabilities [{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir4700_142"
+ "37}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "0501eda8a3e393ab97da9ab3839ea770\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi"
+ "cation_Inner_Screen.Chart_View.Chart_Menu.Approve_All.Graph.Approve"
+ "AllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColumApp"
+ "roveAll_TC2741(String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\Access Certification\\Access Certification Inner "
+ "Screen\\Chart View\\Chart Menu\\Approve "
+ "All\\Graph\\ApproveAllGraph.cs:line 15"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingApprovedApproveA'
+ 'll_TC11159("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Approve_All.Graph.Approve'
+ 'AllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingApprove'
+ 'dApproveAll_TC11159(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Approve '
+ 'All\\Graph\\ApproveAllGraph.cs:line 65'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingForActionApprove'
+ 'All_TC2744("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Approve_All.Graph.Approve'
+ 'AllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingForActi'
+ 'onApproveAll_TC2744(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Approve '
+ 'All\\Graph\\ApproveAllGraph.cs:line 39'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingRejectApproveAll'
+ '_TC11160("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Approve_All.Graph.Approve'
+ 'AllGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingRejectA'
+ 'pproveAll_TC11160(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Approve '
+ 'All\\Graph\\ApproveAllGraph.cs:line 93'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 4s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColumClearAll_TC2'
+ '749("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ "System.InvalidOperationException : unknown error: Element is not "
+ "clickable at point (932, 731)\n (Session info: "
+ "chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 66 "
+ "milliseconds\nBuild info: version: '3.1.0', revision: '86a5d70', "
+ "time: '2017-02-16 07:57:44 -0800'\nSystem info: host: "
+ "'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server "
+ "2012 R2', os.arch: 'x86', os.version: '6.3', java.version: "
+ "'1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\n"
+ "Capabilities [{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir6552_284"
+ "03}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "5646c3ae0ba7663483cda0a3894fe2a9\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi"
+ "cation_Inner_Screen.Chart_View.Chart_Menu.Clear_All.Graph.ClearAllG"
+ "raph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColumClearAl"
+ "l_TC2749(String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\Access Certification\\Access Certification Inner "
+ "Screen\\Chart View\\Chart Menu\\Clear "
+ "All\\Graph\\ClearAllGraph.cs:line 15"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingApprovedClearAll'
+ '_TC2750("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Clear_All.Graph.ClearAllG'
+ 'raph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingApprovedCle'
+ 'arAll_TC2750(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Clear '
+ 'All\\Graph\\ClearAllGraph.cs:line 46'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingForActionClearAl'
+ 'l_TC2752("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Clear_All.Graph.ClearAllG'
+ 'raph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingForActionCl'
+ 'earAll_TC2752(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Clear '
+ 'All\\Graph\\ClearAllGraph.cs:line 112'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingRejectedClearAll'
+ '_TC2751("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Clear_All.Graph.ClearAllG'
+ 'raph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingRejectedCle'
+ 'arAll_TC2751(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Clear '
+ 'All\\Graph\\ClearAllGraph.cs:line 79'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 4s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_ColumnFiltering_FilterFurther'
+ 'By_FilterBy_AllPossibleFields_TC2771("/#/tasks/access-certification'
+ '/overview") failed',
+ 'raw_details':
+ "System.InvalidOperationException : unknown error: Element is not "
+ "clickable at point (932, 731)\n (Session info: "
+ "chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 61 "
+ "milliseconds\nBuild info: version: '3.1.0', revision: '86a5d70', "
+ "time: '2017-02-16 07:57:44 -0800'\nSystem info: host: "
+ "'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server "
+ "2012 R2', os.arch: 'x86', os.version: '6.3', java.version: "
+ "'1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\n"
+ "Capabilities [{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir32_9833}"
+ ", takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "258bbe17298009e5e47efcf485ebccd3\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi"
+ "cation_Inner_Screen.Chart_View.Chart_Menu.Column_Filtering.Filter_f"
+ "urther_by.Filter_By.FilterBy.UI_MyTask_AC_ACIS_ChartView_ChartMenu_"
+ "ColumnFiltering_FilterFurtherBy_FilterBy_AllPossibleFields_TC2771(S"
+ "tring url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\Access Certification\\Access Certification Inner "
+ "Screen\\Chart View\\Chart Menu\\Column Filtering\\Filter further "
+ "by\\Filter By\\FilterBy.cs:line 106"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_ColumnFiltering_FilterFurther'
+ 'By_FilterBy_OneColumnOutOfManyWithAlreadyExistingFilters_TC2768("/#'
+ '/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Column_Filtering.Filter_f'
+ 'urther_by.Filter_By.FilterBy.UI_MyTask_AC_ACIS_ChartView_ChartMenu_'
+ 'ColumnFiltering_FilterFurtherBy_FilterBy_OneColumnOutOfManyWithAlre'
+ 'adyExistingFilters_TC2768(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Column Filtering\\Filter further '
+ 'by\\Filter By\\FilterBy.cs:line 54'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_ColumnFiltering_FilterFurther'
+ 'By_FilterBy_OneColumnOutOfManyWithNoExistingFilters_TC2767("/#/task'
+ 's/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Column_Filtering.Filter_f'
+ 'urther_by.Filter_By.FilterBy.UI_MyTask_AC_ACIS_ChartView_ChartMenu_'
+ 'ColumnFiltering_FilterFurtherBy_FilterBy_OneColumnOutOfManyWithNoEx'
+ 'istingFilters_TC2767(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Column Filtering\\Filter further '
+ 'by\\Filter By\\FilterBy.cs:line 13'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 4s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColum_WithOnlyPen'
+ 'dingForAction_TC2753("/#/tasks/access-certification/overview") '
+ 'failed',
+ 'raw_details':
+ "System.InvalidOperationException : unknown error: Element is not "
+ "clickable at point (932, 731)\n (Session info: "
+ "chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 65 "
+ "milliseconds\nBuild info: version: '3.1.0', revision: '86a5d70', "
+ "time: '2017-02-16 07:57:44 -0800'\nSystem info: host: "
+ "'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server "
+ "2012 R2', os.arch: 'x86', os.version: '6.3', java.version: "
+ "'1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\n"
+ "Capabilities [{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir2992_316"
+ "86}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "8397ed2522698ddccb6b0aa573d920e9\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi"
+ "cation_Inner_Screen.Chart_View.Chart_Menu.Entire_Column.EntireColum"
+ "n.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColum_WithOnlyP"
+ "endingForAction_TC2753(String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\Access Certification\\Access Certification Inner "
+ "Screen\\Chart View\\Chart Menu\\Entire "
+ "Column\\EntireColumn.cs:line 16"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColum_WithPending'
+ 'ForActionAndUncommittedApproved_TC2754("/#/tasks/access-certificati'
+ 'on/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Entire_Column.EntireColum'
+ 'n.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColum_WithPendi'
+ 'ngForActionAndUncommittedApproved_TC2754(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Entire '
+ 'Column\\EntireColumn.cs:line 57'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColum_WithPending'
+ 'ForActionAndUncommittedReject_TC2755("/#/tasks/access-certification'
+ '/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Entire_Column.EntireColum'
+ 'n.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColum_WithPendi'
+ 'ngForActionAndUncommittedReject_TC2755(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Entire '
+ 'Column\\EntireColumn.cs:line 83'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_WithPendingForActionApp'
+ 'rovedCommittedAndRejectedCommitted_TC2758("/#/tasks/access-certific'
+ 'ation/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Entire_Column.EntireColum'
+ 'n.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_WithPendingForActionA'
+ 'pprovedCommittedAndRejectedCommitted_TC2758(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Entire '
+ 'Column\\EntireColumn.cs:line 148'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_WithUncommittedApproved'
+ 'AndUncommittedReject_TC2756("/#/tasks/access-certification/overview'
+ '") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Entire_Column.EntireColum'
+ 'n.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_WithUncommittedApprov'
+ 'edAndUncommittedReject_TC2756(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Entire '
+ 'Column\\EntireColumn.cs:line 111'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 4s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Menus_ColumnEntireColumn_TC79'
+ '37_TC7927("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ "System.InvalidOperationException : unknown error: Element is not "
+ "clickable at point (932, 731)\n (Session info: "
+ "chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 67 "
+ "milliseconds\nBuild info: version: '3.1.0', revision: '86a5d70', "
+ "time: '2017-02-16 07:57:44 -0800'\nSystem info: host: "
+ "'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server "
+ "2012 R2', os.arch: 'x86', os.version: '6.3', java.version: "
+ "'1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\n"
+ "Capabilities [{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir2696_148"
+ "36}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "6a683eff25d0c058e04394158f5d2245\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi"
+ "cation_Inner_Screen.Chart_View.Chart_Menu.Menus.Menus.UI_MyTask_AC_"
+ "ACIS_ChartView_ChartMenu_Menus_ColumnEntireColumn_TC7937_TC7927(Str"
+ "ing url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\Access Certification\\Access Certification Inner "
+ "Screen\\Chart View\\Chart Menu\\Menus\\Menus.cs:line 58"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Menus_ColumnPendingAcions_TC7'
+ '938("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Menus.Menus.UI_MyTask_AC_'
+ 'ACIS_ChartView_ChartMenu_Menus_ColumnPendingAcions_TC7938(String '
+ 'url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Menus\\Menus.cs:line 100'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Menus_FilterFurtherBy_TC7939('
+ '"/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Menus.Menus.UI_MyTask_AC_'
+ 'ACIS_ChartView_ChartMenu_Menus_FilterFurtherBy_TC7939(String url) '
+ 'in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Menus\\Menus.cs:line 140'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Menus_TakeActionOn_TC7936("/#'
+ '/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Menus.Menus.UI_MyTask_AC_'
+ 'ACIS_ChartView_ChartMenu_Menus_TakeActionOn_TC7936(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Menus\\Menus.cs:line 13'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 4s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColumRejectAll_TC'
+ '2763("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ "System.InvalidOperationException : unknown error: Element is not "
+ "clickable at point (932, 731)\n (Session info: "
+ "chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 60 "
+ "milliseconds\nBuild info: version: '3.1.0', revision: '86a5d70', "
+ "time: '2017-02-16 07:57:44 -0800'\nSystem info: host: "
+ "'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server "
+ "2012 R2', os.arch: 'x86', os.version: '6.3', java.version: "
+ "'1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\n"
+ "Capabilities [{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir10404_20"
+ "818}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "50f2dfc6d36fd64051d143d025dc8e53\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi"
+ "cation_Inner_Screen.Chart_View.Chart_Menu.Reject_All.Graph.RejectAl"
+ "lGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_EntireColumRejec"
+ "tAll_TC2763(String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\Access Certification\\Access Certification Inner "
+ "Screen\\Chart View\\Chart Menu\\Reject "
+ "All\\Graph\\RejectAllGraph.cs:line 15"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingApprovedRejectAl'
+ 'l_TC2765("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Reject_All.Graph.RejectAl'
+ 'lGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingApprovedR'
+ 'ejectAll_TC2765(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Reject '
+ 'All\\Graph\\RejectAllGraph.cs:line 65'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingForActionRejectA'
+ 'll_TC2764("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Reject_All.Graph.RejectAl'
+ 'lGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingForAction'
+ 'RejectAll_TC2764(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Reject '
+ 'All\\Graph\\RejectAllGraph.cs:line 39'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingRejectRejectAll_'
+ 'TC2766("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Chart_Menu.Reject_All.Graph.RejectAl'
+ 'lGraph.UI_MyTask_AC_ACIS_ChartView_ChartMenu_Graph_PendingRejectRej'
+ 'ectAll_TC2766(String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Chart Menu\\Reject '
+ 'All\\Graph\\RejectAllGraph.cs:line 93'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 4s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_AllColumnsAvailab'
+ 'le_TC2793("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ "System.InvalidOperationException : unknown error: Element is not "
+ "clickable at point (932, 731)\n (Session info: "
+ "chrome=58.0.3029.110)\n (Driver info: chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 61 "
+ "milliseconds\nBuild info: version: '3.1.0', revision: '86a5d70', "
+ "time: '2017-02-16 07:57:44 -0800'\nSystem info: host: "
+ "'BRC-JENKINS2-AU', ip: '172.16.61.17', os.name: 'Windows Server "
+ "2012 R2', os.arch: 'x86', os.version: '6.3', java.version: "
+ "'1.8.0_66'\nDriver info: org.openqa.selenium.chrome.ChromeDriver\n"
+ "Capabilities [{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir3796_318"
+ "36}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "840df673591317f43b8304ab9db74078\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebElement.Click()\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi"
+ "cation_Inner_Screen.Chart_View.Look_And_Feel.LookAndFeel.UI_MyTask_"
+ "AC_ACIS_ChartView_ChartMenu_LookAndFeel_AllColumnsAvailable_TC2793("
+ "String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\Access Certification\\Access Certification Inner "
+ "Screen\\Chart View\\Look And Feel\\LookAndFeel.cs:line 16"
+ }
+ ]
+ }
+ },
+ {
+ 'output': {
+ 'title': '140 fail, 6 pass in 14m 11s',
+ 'summary':
+ '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002'
+ '14m 11s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '146 tests\u2003\u205f\u2004\u205f\u20046 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '0 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '140 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004'
+ '6 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '0 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '144 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr'
+ '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I'
+ 'W47TKN1itFqhArnCTqT2gJWzj61YxoMC2hP+LLDGVl5L8x8FfYZlP2KbPv4AjSxOwi+f0'
+ 'AEAq2iOkAAAA=\n',
+ 'annotations': [
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_LongNameGetsTC279'
+ '5("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Look_And_Feel.LookAndFeel.UI_MyTask_'
+ 'AC_ACIS_ChartView_ChartMenu_LookAndFeel_LongNameGetsTC2795(String '
+ 'url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Look And Feel\\LookAndFeel.cs:line 55'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_TableViewTC2799("'
+ '/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Look_And_Feel.LookAndFeel.UI_MyTask_'
+ 'AC_ACIS_ChartView_ChartMenu_LookAndFeel_TableViewTC2799(String '
+ 'url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Look And Feel\\LookAndFeel.cs:line 99'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_TextAndColorsTC27'
+ '94("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Look_And_Feel.LookAndFeel.UI_MyTask_'
+ 'AC_ACIS_ChartView_ChartMenu_LookAndFeel_TextAndColorsTC2794(String '
+ 'url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Look And Feel\\LookAndFeel.cs:line 34'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_ToolTipTC2796_TC2'
+ '772("/#/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Look_And_Feel.LookAndFeel.UI_MyTask_'
+ 'AC_ACIS_ChartView_ChartMenu_LookAndFeel_ToolTipTC2796_TC2772(String'
+ ' url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Look And Feel\\LookAndFeel.cs:line 75'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_ACIS_ChartView_ChartMenu_LookAndFeel_ToolTipTC7926("/#'
+ '/tasks/access-certification/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Certification.Access_Certifi'
+ 'cation_Inner_Screen.Chart_View.Look_And_Feel.LookAndFeel.UI_MyTask_'
+ 'AC_ACIS_ChartView_ChartMenu_LookAndFeel_ToolTipTC7926(String url) '
+ 'in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Certification\\Access Certification Inner '
+ 'Screen\\Chart View\\Look And Feel\\LookAndFeel.cs:line 121'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_FiltersValidation(True,"chrome","/#/tasks/access-certi'
+ 'fication/overview") failed',
+ 'raw_details': 'OneTimeSetUp: No suitable constructor was found'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_DataGrid_DataDisplay_TC2101(True,"chrome","/#/tasks/ac'
+ 'cess-certification/overview") failed',
+ 'raw_details': 'OneTimeSetUp: No suitable constructor was found'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_DataGrid_Header_TC2100(True,"chrome","/#/tasks/access-'
+ 'certification/overview") failed',
+ 'raw_details': 'OneTimeSetUp: No suitable constructor was found'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_DataGrid_Navigation_TC2099(True,"chrome","/#/tasks/acc'
+ 'ess-certification/overview") failed',
+ 'raw_details': 'OneTimeSetUp: No suitable constructor was found'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_DataGrid_Paging_TC2102(True,"chrome","/#/tasks/access-'
+ 'certification/overview") failed',
+ 'raw_details': 'OneTimeSetUp: No suitable constructor was found'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_Grid_Grid_Validation(True,"chrome","#/tasks/access-cer'
+ 'tification/overview") failed',
+ 'raw_details': 'OneTimeSetUp: No suitable constructor was found'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_TwoUsersTwoApplicationsValidation(True,"chrome","/#/ta'
+ 'sks/access-certification/overview") failed',
+ 'raw_details': 'OneTimeSetUp: No suitable constructor was found'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 31s]',
+ 'title':
+ 'All 2 runs failed: '
+ 'UI_MyTasks_AR_Paging_ShowPerPage(True,"chrome","/#/tasks/access-req'
+ 'uest/overview")',
+ 'raw_details': 'OneTimeSetUp: No suitable constructor was found'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_Progress_Approve(True,"chrome","/#/tasks/access-certif'
+ 'ication/overview") failed',
+ 'raw_details': 'OneTimeSetUp: No suitable constructor was found'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AC_Progress_Reject(True,"chrome","/#/tasks/access-certifi'
+ 'cation/overview") failed',
+ 'raw_details': 'OneTimeSetUp: No suitable constructor was found'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 30s]',
+ 'title':
+ 'UI_MyTask_AR_Filters_FilterLayout(True,"chrome","/#/tasks/access-ce'
+ 'rtification/overview") failed',
+ 'raw_details':
+ "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 "
+ "seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such "
+ "element: Unable to locate element: "
+ "{\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}"
+ "\n (Session info: chrome=58.0.3029.110)\n (Driver info: "
+ "chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 15 "
+ "milliseconds\nFor documentation on this error, please visit: "
+ "http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: "
+ "version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 "
+ "-0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', "
+ "os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: "
+ "'6.3', java.version: '1.8.0_66'\nDriver info: "
+ "org.openqa.selenium.chrome.ChromeDriver\nCapabilities "
+ "[{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir11804_16"
+ "895}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "29b4b9836d0675d3828a94e2f11cf9d7\n*** Element info: {Using=xpath, "
+ "value=//span[@translate='_Loading_']}\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri"
+ "ng exceptionMessage, Exception lastException)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base"
+ ".cs:line 537\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks.Access_Request.Filters.FiltersValid"
+ "ation.UI_MyTask_AR_Filters_FilterLayout(Boolean excute, String "
+ "browserName, String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\Access Request\\Filters\\FiltersValidation.cs:line 29\n"
+ "--NoSuchElementException\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String "
+ "mechanism, String value)\n at "
+ "OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<"
+ "ElementIsVisible>b__12(IWebDriver driver)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AR_Filters_FiltersFunctionality(True,"chrome","/#/tasks/a'
+ 'ccess-request/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Request.Filters.FiltersValid'
+ 'ation.UI_MyTask_AR_Filters_FiltersFunctionality(Boolean excute, '
+ 'String browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Request\\Filters\\FiltersValidation.cs:line 83'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 30s]',
+ 'title':
+ 'UI_MyTask_AR_Grid_FilterVAlidates(True,"chrome","/#/tasks/access-ce'
+ 'rtification/overview") failed',
+ 'raw_details':
+ "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 "
+ "seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such "
+ "element: Unable to locate element: "
+ "{\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}"
+ "\n (Session info: chrome=58.0.3029.110)\n (Driver info: "
+ "chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 15 "
+ "milliseconds\nFor documentation on this error, please visit: "
+ "http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: "
+ "version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 "
+ "-0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', "
+ "os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: "
+ "'6.3', java.version: '1.8.0_66'\nDriver info: "
+ "org.openqa.selenium.chrome.ChromeDriver\nCapabilities "
+ "[{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir12972_27"
+ "801}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "fca88dd0490c464a5ded2f16849929d8\n*** Element info: {Using=xpath, "
+ "value=//span[@translate='_Loading_']}\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri"
+ "ng exceptionMessage, Exception lastException)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base"
+ ".cs:line 537\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks.Access_Request.Grid.GridValidation."
+ "UI_MyTask_AR_Grid_FilterVAlidates(Boolean excute, String "
+ "browserName, String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\Access Request\\Grid\\GridValidation.cs:line 29\n"
+ "--NoSuchElementException\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String "
+ "mechanism, String value)\n at "
+ "OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<"
+ "ElementIsVisible>b__12(IWebDriver driver)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AR_Grid_Paging(True,"chrome","/#/tasks/access-certificati'
+ 'on/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Request.Grid.GridValidation.'
+ 'UI_MyTask_AR_Grid_Paging(Boolean excute, String browserName, '
+ 'String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Request\\Grid\\GridValidation.cs:line 65'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 30s]',
+ 'title':
+ 'UI_MyTask_AR_Progress_Approve(True,"chrome","/#/tasks/access-certif'
+ 'ication/overview") failed',
+ 'raw_details':
+ "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 "
+ "seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such "
+ "element: Unable to locate element: "
+ "{\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}"
+ "\n (Session info: chrome=58.0.3029.110)\n (Driver info: "
+ "chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 0 "
+ "milliseconds\nFor documentation on this error, please visit: "
+ "http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: "
+ "version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 "
+ "-0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', "
+ "os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: "
+ "'6.3', java.version: '1.8.0_66'\nDriver info: "
+ "org.openqa.selenium.chrome.ChromeDriver\nCapabilities "
+ "[{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir3688_215"
+ "57}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "fc2e027b336637b143a0098139997621\n*** Element info: {Using=xpath, "
+ "value=//span[@translate='_Loading_']}\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri"
+ "ng exceptionMessage, Exception lastException)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base"
+ ".cs:line 537\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks.Access_Request.Progress.ProgressVal"
+ "idation.UI_MyTask_AR_Progress_Approve(Boolean excute, String "
+ "browserName, String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\Access Request\\Progress\\ProgressValidation.cs:line 32\n"
+ "--NoSuchElementException\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String "
+ "mechanism, String value)\n at "
+ "OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<"
+ "ElementIsVisible>b__12(IWebDriver driver)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_AR_Progress_Reject(True,"chrome","/#/tasks/access-certifi'
+ 'cation/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Access_Request.Progress.ProgressVal'
+ 'idation.UI_MyTask_AR_Progress_Reject(Boolean excute, String '
+ 'browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Access Request\\Progress\\ProgressValidation.cs:line 80'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 31s]',
+ 'title':
+ 'UI_MyTask_CC_Filters_FilterVAlidates(True,"chrome","/#/tasks/access'
+ '-certification/overview") failed',
+ 'raw_details':
+ "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 "
+ "seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such "
+ "element: Unable to locate element: "
+ "{\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}"
+ "\n (Session info: chrome=58.0.3029.110)\n (Driver info: "
+ "chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 0 "
+ "milliseconds\nFor documentation on this error, please visit: "
+ "http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: "
+ "version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 "
+ "-0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', "
+ "os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: "
+ "'6.3', java.version: '1.8.0_66'\nDriver info: "
+ "org.openqa.selenium.chrome.ChromeDriver\nCapabilities "
+ "[{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir13304_30"
+ "088}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "e6e1a454eceffe04daec2df3121843c6\n*** Element info: {Using=xpath, "
+ "value=//span[@translate='_Loading_']}\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri"
+ "ng exceptionMessage, Exception lastException)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base"
+ ".cs:line 537\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks.Compliance_Control.Grid.GridValidat"
+ "ion.UI_MyTask_CC_Filters_FilterVAlidates(Boolean excute, String "
+ "browserName, String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\Compliance Control\\Grid\\GridValidation.cs:line 30\n"
+ "--NoSuchElementException\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String "
+ "mechanism, String value)\n at "
+ "OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<"
+ "ElementIsVisible>b__12(IWebDriver driver)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_CC_Grid_Paging(True,"chrome","/#/tasks/access-certificati'
+ 'on/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Compliance_Control.Grid.GridValidat'
+ 'ion.UI_MyTask_CC_Grid_Paging(Boolean excute, String browserName, '
+ 'String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Compliance Control\\Grid\\GridValidation.cs:line 66'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 30s]',
+ 'title':
+ 'UI_MyTasks_CC_Paging_ShowPerPage(True,"chrome","/#/tasks/access-req'
+ 'uest/overview") failed',
+ 'raw_details':
+ "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 "
+ "seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such "
+ "element: Unable to locate element: "
+ "{\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}"
+ "\n (Session info: chrome=58.0.3029.110)\n (Driver info: "
+ "chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 0 "
+ "milliseconds\nFor documentation on this error, please visit: "
+ "http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: "
+ "version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 "
+ "-0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', "
+ "os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: "
+ "'6.3', java.version: '1.8.0_66'\nDriver info: "
+ "org.openqa.selenium.chrome.ChromeDriver\nCapabilities "
+ "[{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir6532_293"
+ "46}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "b5311e179a7c4fac0e8285b86e566664\n*** Element info: {Using=xpath, "
+ "value=//span[@translate='_Loading_']}\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri"
+ "ng exceptionMessage, Exception lastException)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base"
+ ".cs:line 537\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks.Compliance_Control.Paging.PagingVal"
+ "idation.UI_MyTasks_CC_Paging_ShowPerPage(Boolean excute, String "
+ "browserName, String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\Compliance Control\\Paging\\PagingValidation.cs:line 24\n"
+ "--NoSuchElementException\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String "
+ "mechanism, String value)\n at "
+ "OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<"
+ "ElementIsVisible>b__12(IWebDriver driver)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 30s]',
+ 'title':
+ 'UI_MyTask_CC_Progress_Approve(True,"chrome","/#/tasks/access-certif'
+ 'ication/overview") failed',
+ 'raw_details':
+ "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 "
+ "seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such "
+ "element: Unable to locate element: "
+ "{\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}"
+ "\n (Session info: chrome=58.0.3029.110)\n (Driver info: "
+ "chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 0 "
+ "milliseconds\nFor documentation on this error, please visit: "
+ "http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: "
+ "version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 "
+ "-0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', "
+ "os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: "
+ "'6.3', java.version: '1.8.0_66'\nDriver info: "
+ "org.openqa.selenium.chrome.ChromeDriver\nCapabilities "
+ "[{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir12668_24"
+ "175}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "1a60859e82be5a9504866d8d9e6b21ba\n*** Element info: {Using=xpath, "
+ "value=//span[@translate='_Loading_']}\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri"
+ "ng exceptionMessage, Exception lastException)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base"
+ ".cs:line 537\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks.Compliance_Control.Progress.Progres"
+ "sValidation.UI_MyTask_CC_Progress_Approve(Boolean excute, String "
+ "browserName, String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\Compliance Control\\Progress\\ProgressValidation.cs:line "
+ "27\n--NoSuchElementException\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String "
+ "mechanism, String value)\n at "
+ "OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<"
+ "ElementIsVisible>b__12(IWebDriver driver)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_CC_Progress_Reject(True,"chrome","/#/tasks/access-certifi'
+ 'cation/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks.Compliance_Control.Progress.Progres'
+ 'sValidation.UI_MyTask_CC_Progress_Reject(Boolean excute, String '
+ 'browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Compliance Control\\Progress\\ProgressValidation.cs:line '
+ '76'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 30s]',
+ 'title':
+ 'UI_MyTask_CC_Filters_FilterLayout(True,"chrome","/#/tasks/access-ce'
+ 'rtification/overview") failed',
+ 'raw_details':
+ "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 "
+ "seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such "
+ "element: Unable to locate element: "
+ "{\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}"
+ "\n (Session info: chrome=58.0.3029.110)\n (Driver info: "
+ "chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 16 "
+ "milliseconds\nFor documentation on this error, please visit: "
+ "http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: "
+ "version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 "
+ "-0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', "
+ "os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: "
+ "'6.3', java.version: '1.8.0_66'\nDriver info: "
+ "org.openqa.selenium.chrome.ChromeDriver\nCapabilities "
+ "[{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir10360_63"
+ "06}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "68b0320c39a561808d45f7b1bd2ce18e\n*** Element info: {Using=xpath, "
+ "value=//span[@translate='_Loading_']}\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri"
+ "ng exceptionMessage, Exception lastException)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base"
+ ".cs:line 537\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks_Compliance_Control.Filters.FiltersV"
+ "alidation.UI_MyTask_CC_Filters_FilterLayout(Boolean excute, String "
+ "browserName, String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\Compliance Control\\Filters\\FiltersValidation.cs:line 30\n"
+ "--NoSuchElementException\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String "
+ "mechanism, String value)\n at "
+ "OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<"
+ "ElementIsVisible>b__12(IWebDriver driver)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_CC_Filters_FiltersFunctionality(True,"chrome","/#/tasks/a'
+ 'ccess-request/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks_Compliance_Control.Filters.FiltersV'
+ 'alidation.UI_MyTask_CC_Filters_FiltersFunctionality(Boolean '
+ 'excute, String browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\Compliance Control\\Filters\\FiltersValidation.cs:line 69'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 30s]',
+ 'title':
+ 'UI_MyTask_MR_Filters_FilterLayout(True,"chrome","/#/tasks/access-ce'
+ 'rtification/overview") failed',
+ 'raw_details':
+ "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 "
+ "seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such "
+ "element: Unable to locate element: "
+ "{\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}"
+ "\n (Session info: chrome=58.0.3029.110)\n (Driver info: "
+ "chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 0 "
+ "milliseconds\nFor documentation on this error, please visit: "
+ "http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: "
+ "version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 "
+ "-0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', "
+ "os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: "
+ "'6.3', java.version: '1.8.0_66'\nDriver info: "
+ "org.openqa.selenium.chrome.ChromeDriver\nCapabilities "
+ "[{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir2736_229"
+ "08}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "52ab857fbeb80383ec0a4311504f7b8e\n*** Element info: {Using=xpath, "
+ "value=//span[@translate='_Loading_']}\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri"
+ "ng exceptionMessage, Exception lastException)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base"
+ ".cs:line 537\n at "
+ "MyCompanyUiSettings.Tl.My_Tasks_My_Requests.Filters.FiltersValidati"
+ "on.UI_MyTask_MR_Filters_FilterLayout(Boolean excute, String "
+ "browserName, String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My"
+ " Tasks\\My Requests\\Filters\\FiltersValidation.cs:line 22\n"
+ "--NoSuchElementException\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String "
+ "mechanism, String value)\n at "
+ "OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<"
+ "ElementIsVisible>b__12(IWebDriver driver)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UI_MyTask_MR_Filters_FiltersFunctionality(True,"chrome","/#/tasks/a'
+ 'ccess-request/overview") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.My_Tasks_My_Requests.Filters.FiltersValidati'
+ 'on.UI_MyTask_MR_Filters_FiltersFunctionality(Boolean excute, '
+ 'String browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\My'
+ ' Tasks\\My Requests\\Filters\\FiltersValidation.cs:line 78'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 30s]',
+ 'title':
+ 'UIAlertExclusionAddAccountsFromSearch_TC7466(True,"en","1","chrome"'
+ ',"/#/settings/general-settings") failed',
+ 'raw_details':
+ "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 "
+ "seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such "
+ "element: Unable to locate element: "
+ "{\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}"
+ "\n (Session info: chrome=58.0.3029.110)\n (Driver info: "
+ "chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 0 "
+ "milliseconds\nFor documentation on this error, please visit: "
+ "http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: "
+ "version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 "
+ "-0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', "
+ "os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: "
+ "'6.3', java.version: '1.8.0_66'\nDriver info: "
+ "org.openqa.selenium.chrome.ChromeDriver\nCapabilities "
+ "[{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir3016_202"
+ "27}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "c9411ed622920bbdad53147bc36fd09b\n*** Element info: {Using=xpath, "
+ "value=//span[@translate='_Loading_']}\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri"
+ "ng exceptionMessage, Exception lastException)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base"
+ ".cs:line 537\n at "
+ "MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UI"
+ "AlertExclusionAddAccountsFromSearch_TC7466(Boolean excute, String "
+ "language, String itteration, String browserName, String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett"
+ "ings\\Alert Exclusions\\AlertExclusions.cs:line 76\n"
+ "--NoSuchElementException\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String "
+ "mechanism, String value)\n at "
+ "OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<"
+ "ElementIsVisible>b__12(IWebDriver driver)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIAlertExclusionBulkActionsCoverage_TC7465(True,"en","1","chrome","'
+ '/#/settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UI'
+ 'AlertExclusionBulkActionsCoverage_TC7465(Boolean excute, String '
+ 'language, String itteration, String browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Alert Exclusions\\AlertExclusions.cs:line 111'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIAlertExclusionBulkUploadCoverage_TC7467_TC7468(True,"en","1","chr'
+ 'ome","/#/settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UI'
+ 'AlertExclusionBulkUploadCoverage_TC7467_TC7468(Boolean excute, '
+ 'String language, String itteration, String browserName, String '
+ 'url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Alert Exclusions\\AlertExclusions.cs:line 575'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIAlertExclusionBulkUploadDownloadSampleFile_TC7464(True,"en","1","'
+ 'chrome","/#/settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UI'
+ 'AlertExclusionBulkUploadDownloadSampleFile_TC7464(Boolean excute, '
+ 'String language, String itteration, String browserName, String '
+ 'url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Alert Exclusions\\AlertExclusions.cs:line 155'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIAlertExclusionColumns_TC7474(True,"en","1","chrome","/#/settings/'
+ 'general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UI'
+ 'AlertExclusionColumns_TC7474(Boolean excute, String language, '
+ 'String itteration, String browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Alert Exclusions\\AlertExclusions.cs:line 204'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIAlertExclusionGridCoverage_TC7465(True,"en","1","chrome","/#/sett'
+ 'ings/general-settings","u0g793,u1g1,u1g792,u1g802,u2g399,u2g8...") '
+ 'failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UI'
+ 'AlertExclusionGridCoverage_TC7465(Boolean excute, String language, '
+ 'String itteration, String browserName, String url, String names) '
+ 'in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Alert Exclusions\\AlertExclusions.cs:line 532'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIAlertExclusionLoadSameAccountTwice_TC7473(True,"en","1","chrome",'
+ '"/#/settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UI'
+ 'AlertExclusionLoadSameAccountTwice_TC7473(Boolean excute, String '
+ 'language, String itteration, String browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Alert Exclusions\\AlertExclusions.cs:line 301'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIAlertExclusionNonCsvFormat_TC7472(True,"en","1","chrome","/#/sett'
+ 'ings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UI'
+ 'AlertExclusionNonCsvFormat_TC7472(Boolean excute, String language, '
+ 'String itteration, String browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Alert Exclusions\\AlertExclusions.cs:line 349'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIAlertExclusionPaginationCoverage_TC7471(True,"en","1","chrome","/'
+ '#/settings/general-settings","u0g791,u0g801,u1g791,u1g801,u2g791,u2'
+ '...") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UI'
+ 'AlertExclusionPaginationCoverage_TC7471(Boolean excute, String '
+ 'language, String itteration, String browserName, String url, '
+ 'String names) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Alert Exclusions\\AlertExclusions.cs:line 32'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIAlertExclusionRemoveAccounts_TC7470(True,"en","1","chrome","/#/se'
+ 'ttings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UI'
+ 'AlertExclusionRemoveAccounts_TC7470(Boolean excute, String '
+ 'language, String itteration, String browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Alert Exclusions\\AlertExclusions.cs:line 397'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIAlertExclusionScreenOverviewLook_TC7465(True,"en","1","chrome","/'
+ '#/settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UI'
+ 'AlertExclusionScreenOverviewLook_TC7465(Boolean excute, String '
+ 'language, String itteration, String browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Alert Exclusions\\AlertExclusions.cs:line 248'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIAlertExclusionSearchCurrentExcludedAccounts_TC7475(True,"en","1",'
+ '"chrome","/#/settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UI'
+ 'AlertExclusionSearchCurrentExcludedAccounts_TC7475(Boolean excute, '
+ 'String language, String itteration, String browserName, String '
+ 'url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Alert Exclusions\\AlertExclusions.cs:line 488'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIAlertExclusionShowPerPageCoverage_TC7465(True,"en","1","chrome","'
+ '/#/settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Alert_Exclusions.AlertExclusions.UI'
+ 'AlertExclusionShowPerPageCoverage_TC7465(Boolean excute, String '
+ 'language, String itteration, String browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Alert Exclusions\\AlertExclusions.cs:line 447'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 30s]',
+ 'title':
+ 'UIDataOwnerExclusionAddAccountsFromSearch_TC3411(True,"en","1","chr'
+ 'ome","/#/settings/general-settings") failed',
+ 'raw_details':
+ "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 "
+ "seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such "
+ "element: Unable to locate element: "
+ "{\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}"
+ "\n (Session info: chrome=58.0.3029.110)\n (Driver info: "
+ "chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 0 "
+ "milliseconds\nFor documentation on this error, please visit: "
+ "http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: "
+ "version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 "
+ "-0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', "
+ "os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: "
+ "'6.3', java.version: '1.8.0_66'\nDriver info: "
+ "org.openqa.selenium.chrome.ChromeDriver\nCapabilities "
+ "[{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir9916_128"
+ "85}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "d3eacb9d6fac9a67fa47aa82158da43c\n*** Element info: {Using=xpath, "
+ "value=//span[@translate='_Loading_']}\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri"
+ "ng exceptionMessage, Exception lastException)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base"
+ ".cs:line 537\n at "
+ "MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExcl"
+ "usions.UIDataOwnerExclusionAddAccountsFromSearch_TC3411(Boolean "
+ "excute, String language, String itteration, String browserName, "
+ "String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett"
+ "ings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 142\n"
+ "--NoSuchElementException\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String "
+ "mechanism, String value)\n at "
+ "OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<"
+ "ElementIsVisible>b__12(IWebDriver driver)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIDataOwnerExclusionBulkActionsCoverage_TC7554_TC3415(True,"en","1"'
+ ',"chrome","/#/settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExcl'
+ 'usions.UIDataOwnerExclusionBulkActionsCoverage_TC7554_TC3415(Boolea'
+ 'n excute, String language, String itteration, String browserName, '
+ 'String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 180'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIDataOwnerExclusionBulkUploadCoverage_TC3412_TC3413(True,"en","1",'
+ '"chrome","/#/settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExcl'
+ 'usions.UIDataOwnerExclusionBulkUploadCoverage_TC3412_TC3413(Boolean'
+ ' excute, String language, String itteration, String browserName, '
+ 'String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 78'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIDataOwnerExclusionColumns_TC3419(True,"en","1","chrome","/#/setti'
+ 'ngs/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExcl'
+ 'usions.UIDataOwnerExclusionColumns_TC3419(Boolean excute, String '
+ 'language, String itteration, String browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 223'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIDataOwnerExclusionGridCoverage_TC7554(True,"en","1","chrome","/#/'
+ 'settings/general-settings","u0g793,u1g1,u1g792,u1g802,u2g399,u2g8..'
+ '.") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExcl'
+ 'usions.UIDataOwnerExclusionGridCoverage_TC7554(Boolean excute, '
+ 'String language, String itteration, String browserName, String '
+ 'url, String names) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 267'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIDataOwnerExclusionLoadSameAccountTwice_TC3418(True,"en","1","chro'
+ 'me","/#/settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExcl'
+ 'usions.UIDataOwnerExclusionLoadSameAccountTwice_TC3418(Boolean '
+ 'excute, String language, String itteration, String browserName, '
+ 'String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 309'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIDataOwnerExclusionNonCsvFormat_TC3417(True,"en","1","chrome","/#/'
+ 'settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExcl'
+ 'usions.UIDataOwnerExclusionNonCsvFormat_TC3417(Boolean excute, '
+ 'String language, String itteration, String browserName, String '
+ 'url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 31'
+ }
+ ]
+ }
+ },
+ {
+ 'output': {
+ 'title': '140 fail, 6 pass in 14m 11s',
+ 'summary':
+ '\u205f\u2004\u205f\u20041 files\u2004\u2003155 suites\u2004\u2003\u2002'
+ '14m 11s '
+ '[:stopwatch:](https://github.com/step-security/publish-unit-test-resu'
+ 'lt-action/blob/VERSION/README.md#the-symbols "duration of all tests")\n'
+ '146 tests\u2003\u205f\u2004\u205f\u20046 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '0 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '140 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n150 runs\u2006\u2003\u205f\u2004\u205f\u2004'
+ '6 '
+ '[:heavy_check_mark:](https://github.com/step-security/publish-unit-te'
+ 'st-result-action/blob/VERSION/README.md#the-symbols "passed tests")\u2003'
+ '0 '
+ '[:zzz:](https://github.com/step-security/publish-unit-test-result-act'
+ 'ion/blob/VERSION/README.md#the-symbols "skipped / disabled tests")\u2003'
+ '144 '
+ '[:x:](https://github.com/step-security/publish-unit-test-result-actio'
+ 'n/blob/VERSION/README.md#the-symbols "failed tests")\n\nResults for '
+ 'commit commit s.\n\n'
+ '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02NSw6AIAwFr'
+ '0JYu9BEjPEyhqDExg+mwMp4dysfZdeZ175eXMM2Wz6wpmLcenARhCCcPEoH5iDRizen0I'
+ 'W47TKN1itFqhArnCTqT2gJWzj61YxoMC2hP+LLDGVl5L8x8FfYZlP2KbPv4AjSxOwi+f0'
+ 'AEAq2iOkAAAA=\n',
+ 'annotations': [
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIDataOwnerExclusionPaginationCoverage_TC7554_TC3415(True,"en","1",'
+ '"chrome","/#/settings/general-settings","u0g106,u0g115,u0g124,u0g13'
+ '3,u0g142,u0...") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExcl'
+ 'usions.UIDataOwnerExclusionPaginationCoverage_TC7554_TC3415(Boolean'
+ ' excute, String language, String itteration, String browserName, '
+ 'String url, String names) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 355'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIDataOwnerExclusionSearchCurrentExcludedAccounts_TC3420(True,"en",'
+ '"1","chrome","/#/settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExcl'
+ 'usions.UIDataOwnerExclusionSearchCurrentExcludedAccounts_TC3420(Boo'
+ 'lean excute, String language, String itteration, String '
+ 'browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 398'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIDataOwnerExclusionShowPerPageCoverage_TC7554(True,"en","1","chrom'
+ 'e","/#/settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Data_Owner_Exclusions.DataOwnerExcl'
+ 'usions.UIDataOwnerExclusionShowPerPageCoverage_TC7554(Boolean '
+ 'excute, String language, String itteration, String browserName, '
+ 'String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Data Owner Exclusions\\DataOwnerExclusions.cs:line 438'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 30s]',
+ 'title':
+ 'UIGeneralSettingsAllowPhysicalPath_TC10766(True,"1","abcd","chrome"'
+ ',"en","user,ra_user","crowdSource,whiteops","user","is_administrato'
+ 'r","/#/settings/general-settings") failed',
+ 'raw_details':
+ "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 "
+ "seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such "
+ "element: Unable to locate element: "
+ "{\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}"
+ "\n (Session info: chrome=58.0.3029.110)\n (Driver info: "
+ "chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 0 "
+ "milliseconds\nFor documentation on this error, please visit: "
+ "http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: "
+ "version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 "
+ "-0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', "
+ "os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: "
+ "'6.3', java.version: '1.8.0_66'\nDriver info: "
+ "org.openqa.selenium.chrome.ChromeDriver\nCapabilities "
+ "[{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir7348_165"
+ "22}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "a9460966896b2f67901d0c200c612026\n*** Element info: {Using=xpath, "
+ "value=//span[@translate='_Loading_']}\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri"
+ "ng exceptionMessage, Exception lastException)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base"
+ ".cs:line 537\n at "
+ "MyCompanyUiSettings.Tl.Settings.Logical_Mapped_Path.Settings.Settin"
+ "gs.UIGeneralSettingsAllowPhysicalPath_TC10766(Boolean excute, "
+ "String itteration, String account, String browserName, String "
+ "language, String dbTables, String dbSchema, String tableName, "
+ "String columnName, String url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett"
+ "ings\\Logical Mapped Path\\Settings\\Settings.cs:line 266\n"
+ "--NoSuchElementException\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String "
+ "mechanism, String value)\n at "
+ "OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<"
+ "ElementIsVisible>b__12(IWebDriver driver)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIGeneralSettingsDataDisplay_TC10898(True,"1","abcd","chrome","en",'
+ '"user,ra_user","crowdSource,whiteops","user","is_administrator","/#'
+ '/settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Logical_Mapped_Path.Settings.Settin'
+ 'gs.UIGeneralSettingsDataDisplay_TC10898(Boolean excute, String '
+ 'itteration, String account, String browserName, String language, '
+ 'String dbTables, String dbSchema, String tableName, String '
+ 'columnName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Logical Mapped Path\\Settings\\Settings.cs:line 75'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIGeneralSettingsExcludeAdministrator_TC10765(True,"1","abcd","chro'
+ 'me","en","user,ra_user","crowdSource,whiteops","user","is_administr'
+ 'ator","/#/settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Logical_Mapped_Path.Settings.Settin'
+ 'gs.UIGeneralSettingsExcludeAdministrator_TC10765(Boolean excute, '
+ 'String itteration, String account, String browserName, String '
+ 'language, String dbTables, String dbSchema, String tableName, '
+ 'String columnName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Logical Mapped Path\\Settings\\Settings.cs:line 192'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIGeneralSettingsNavigation_TC10897(True,"1","abcd","chrome","en","'
+ 'user,ra_user","crowdSource,whiteops","user","is_administrator","/#/'
+ 'settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Logical_Mapped_Path.Settings.Settin'
+ 'gs.UIGeneralSettingsNavigation_TC10897(Boolean excute, String '
+ 'itteration, String account, String browserName, String language, '
+ 'String dbTables, String dbSchema, String tableName, String '
+ 'columnName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Logical Mapped Path\\Settings\\Settings.cs:line 36'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIGeneralSettingsTranslatePhysicalPath_TC10764(True,"1","abcd","chr'
+ 'ome","en","user,ra_user","crowdSource,whiteops","user","is_administ'
+ 'rator","/#/settings/general-settings") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Logical_Mapped_Path.Settings.Settin'
+ 'gs.UIGeneralSettingsTranslatePhysicalPath_TC10764(Boolean excute, '
+ 'String itteration, String account, String browserName, String '
+ 'language, String dbTables, String dbSchema, String tableName, '
+ 'String columnName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Logical Mapped Path\\Settings\\Settings.cs:line 119'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 31s]',
+ 'title':
+ 'UIMessageTemplatesCompanyInformationCoverage_TC3422_TC7534(True,"en'
+ '","1","chrome","/#/settings/crowd-messages/welcome-me...") failed',
+ 'raw_details':
+ "OpenQA.Selenium.WebDriverTimeoutException : Timed out after 30 "
+ "seconds\n ----> OpenQA.Selenium.NoSuchElementException : no such "
+ "element: Unable to locate element: "
+ "{\"method\":\"xpath\",\"selector\":\"//span[@translate='_Loading_']\"}"
+ "\n (Session info: chrome=58.0.3029.110)\n (Driver info: "
+ "chromedriver=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233),platform=Windows NT "
+ "6.3.9600 x86_64) (WARNING: The server did not provide any "
+ "stacktrace information)\nCommand duration or timeout: 16 "
+ "milliseconds\nFor documentation on this error, please visit: "
+ "http://seleniumhq.org/exceptions/no_such_element.html\nBuild info: "
+ "version: '3.1.0', revision: '86a5d70', time: '2017-02-16 07:57:44 "
+ "-0800'\nSystem info: host: 'BRC-JENKINS2-AU', ip: '172.16.61.17', "
+ "os.name: 'Windows Server 2012 R2', os.arch: 'x86', os.version: "
+ "'6.3', java.version: '1.8.0_66'\nDriver info: "
+ "org.openqa.selenium.chrome.ChromeDriver\nCapabilities "
+ "[{applicationCacheEnabled=false, rotatable=false, "
+ "mobileEmulationEnabled=false, networkConnectionEnabled=false, "
+ "chrome={chromedriverVersion=2.29.461591 "
+ "(62ebf098771772160f391d75e589dc567915b233), "
+ "userDataDir=C:\\Users\\BUILD-~1\\AppData\\Local\\Temp\\scoped_dir2232_223"
+ "98}, takesHeapSnapshot=true, pageLoadStrategy=normal, "
+ "databaseEnabled=false, handlesAlerts=true, hasTouchScreen=false, "
+ "version=58.0.3029.110, platform=WIN8_1, "
+ "browserConnectionEnabled=false, nativeEvents=true, "
+ "acceptSslCerts=true, locationContextEnabled=true, "
+ "webStorageEnabled=true, browserName=chrome, takesScreenshot=true, "
+ "javascriptEnabled=true, cssSelectorsEnabled=true, "
+ "unexpectedAlertBehaviour=}]\nSession ID: "
+ "882c55bf9c675e183d7269fae3076ce9\n*** Element info: {Using=xpath, "
+ "value=//span[@translate='_Loading_']}\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.ThrowTimeoutException(Stri"
+ "ng exceptionMessage, Exception lastException)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base"
+ ".cs:line 537\n at "
+ "MyCompanyUiSettings.Tl.Settings.Messages.MessagesTests.UIMessageTem"
+ "platesCompanyInformationCoverage_TC3422_TC7534(Boolean excute, "
+ "String language, String itteration, String browserName, String "
+ "url) in "
+ "C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett"
+ "ings\\Messages\\MessagesTests.cs:line 33\n--NoSuchElementException\n"
+ " at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon"
+ "se errorResponse)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String "
+ "driverCommandToExecute, Dictionary`2 parameters)\n at "
+ "OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String "
+ "mechanism, String value)\n at "
+ "OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<"
+ "ElementIsVisible>b__12(IWebDriver driver)\n at "
+ "OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 "
+ "condition)"
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIMessageTemplatesCompanyInformationFunctionality_TC3422_TC7534(Tru'
+ 'e,"en","1","chrome","/#/settings/crowd-messages/welcome-me...","goo'
+ 'gle") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Messages.MessagesTests.UIMessageTem'
+ 'platesCompanyInformationFunctionality_TC3422_TC7534(Boolean '
+ 'excute, String language, String itteration, String browserName, '
+ 'String url, String companyName) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Messages\\MessagesTests.cs:line 79'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIMessageTemplatesCumulativeReminderScheduling_TC3426(True,"en","1"'
+ ',"chrome","/#/settings/crowd-messages/welcome-me...") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Messages.MessagesTests.UIMessageTem'
+ 'platesCumulativeReminderScheduling_TC3426(Boolean excute, String '
+ 'language, String itteration, String browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Messages\\MessagesTests.cs:line 116'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIMessageTemplatesDiscardChanges_TC3425(True,"en","1","chrome","/#/'
+ 'settings/crowd-messages/welcome-me...") failed',
+ 'raw_details':
+ 'System.InvalidOperationException : Session [(null externalkey)] '
+ 'not available and is not among the last 1000 terminated sessions.\n'
+ 'Active sessions are[]\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.UnpackAndThrowOnError(Respon'
+ 'se errorResponse)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.Execute(String '
+ 'driverCommandToExecute, Dictionary`2 parameters)\n at '
+ 'OpenQA.Selenium.Remote.RemoteWebDriver.FindElement(String '
+ 'mechanism, String value)\n at '
+ 'OpenQA.Selenium.Support.UI.ExpectedConditions.<>c__DisplayClass13.<'
+ 'ElementIsVisible>b__12(IWebDriver driver)\n at '
+ 'OpenQA.Selenium.Support.UI.DefaultWait`1.Until[TResult](Func`2 '
+ 'condition)\n at MyCompanyUiSettings.Bl.Base.waitToLoad() in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Bl\\Base'
+ '.cs:line 537\n at '
+ 'MyCompanyUiSettings.Tl.Settings.Messages.MessagesTests.UIMessageTem'
+ 'platesDiscardChanges_TC3425(Boolean excute, String language, '
+ 'String itteration, String browserName, String url) in '
+ 'C:\\branches\\1\\main-branch\\Automation\\UI\\MyCompanyUiSettings\\Tl\\Sett'
+ 'ings\\Messages\\MessagesTests.cs:line 172'
+ },
+ {
+ 'path': '/',
+ 'start_line': 0,
+ 'end_line': 0,
+ 'annotation_level': 'warning',
+ 'message': 'nunit3/jenkins/NUnit-issue44527.xml\u2003[took 0s]',
+ 'title':
+ 'UIMessageTemplatesHtmlEditor_TC3424(True,"en","1","chrome","/#/sett'
+ 'ings/crowd-messages/welcome-me...","