diff --git a/.clang-tidy b/.clang-tidy index 4631be3b22..bfd73abff3 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,102 +1,59 @@ --- -Checks: 'clang-analyzer-*, performance-*,readability-*,-readability-else-after-return,-readability-identifier-naming,-readability-container-size-empty,-readability-redundant-declaration,modernize-*,-modernize-avoid-bind,-modernize-loop-convert,-modernize-use-using,-modernize-deprecated-headers, llvm-namespace-comment,cppcoreguidelines-pro-*,-cppcoreguidelines-pro-type-reinterpret-cast,-cppcoreguidelines-pro-type-vararg,-cppcoreguidelines-pro-type-const-cast,-cppcoreguidelines-pro-bounds-array-to-pointer-decay,-cppcoreguidelines-pro-bounds-constant-array-index,cert-*,-cert-err58-cpp,-cert-env33-c,-cert-err09-cpp,-cert-err60-cpp,-cert-err61-cpp, -cert-err61-cpp, bugprone-*, google-*,-google-runtime-references,-google-runtime-member-string-references, -google-explicit-constructor, -google-readability-todo, misc-*, -misc-unused-parameters, -misc-macro-parentheses, -misc-throw-by-value-catch-by-reference, -misc-suspicious-enum-usage' +Checks: >- + boost-*, + bugprone-*, + cert-*,-cert-err58-cpp,-cert-env33-c,-cert-err60-cpp, + clang-analyzer-*, + cppcoreguidelines-*,-cppcoreguidelines-avoid-magic-numbers,-cppcoreguidelines-init-variables,-cppcoreguidelines-owning-memory,-cppcoreguidelines-non-private-member-variables-in-classes,-cppcoreguidelines-macro-usage, + cppcoreguidelines-pro-*,-cppcoreguidelines-pro-type-reinterpret-cast,-cppcoreguidelines-pro-type-vararg,-cppcoreguidelines-pro-type-const-cast,-cppcoreguidelines-pro-bounds-array-to-pointer-decay, + google-*,-google-runtime-references,-google-readability-todo, + hicpp-*,-hicpp-no-array-decay,-hicpp-no-assembler,-hicpp-signed-bitwise,-hicpp-vararg, + llvm-namespace-comment, + misc-*,-misc-non-private-member-variables-in-classes, + modernize-*,-modernize-avoid-bind,-modernize-loop-convert,-modernize-use-trailing-return-type, + performance-*, + readability-*,-readability-else-after-return,-readability-identifier-naming,-readability-magic-numbers,-readability-function-cognitive-complexity, + -modernize-use-nodiscard, + WarningsAsErrors: '*' AnalyzeTemporaryDtors: false -CheckOptions: +CheckOptions: - key: cert-dcl59-cpp.HeaderFileExtensions - value: h,hh,hpp,hxx + value: h - key: cert-err09-cpp.CheckThrowTemporaries value: '1' - key: cert-err61-cpp.CheckThrowTemporaries value: '1' - - key: cert-oop11-cpp.IncludeStyle - value: llvm - - key: cppcoreguidelines-pro-bounds-constant-array-index.GslHeader - value: '' - - key: cppcoreguidelines-pro-bounds-constant-array-index.IncludeStyle - value: '0' - key: cppcoreguidelines-pro-type-member-init.IgnoreArrays value: '0' - key: google-build-namespaces.HeaderFileExtensions - value: h,hh,hpp,hxx + value: h - key: google-global-names-in-headers.HeaderFileExtensions value: h - key: google-readability-braces-around-statements.ShortStatementLines value: '1' - key: google-readability-function-size.BranchThreshold - value: '4294967295' + value: '100' - key: google-readability-function-size.LineThreshold - value: '4294967295' + value: '1000' - key: google-readability-function-size.StatementThreshold value: '800' - key: google-readability-namespace-comments.ShortNamespaceLines value: '10' - key: google-readability-namespace-comments.SpacesBeforeComments value: '2' - - key: google-runtime-int.SignedTypePrefix - value: int - - key: google-runtime-int.TypeSuffix - value: '' - - key: google-runtime-int.UnsignedTypePrefix - value: uint - - key: google-runtime-references.WhiteListTypes - value: '' - key: llvm-header-guard.HeaderFileExtensions - value: ',h,hh,hpp,hxx' + value: h - key: llvm-namespace-comment.ShortNamespaceLines - value: '1' + value: '10' - key: llvm-namespace-comment.SpacesBeforeComments - value: '1' - - key: misc-argument-comment.StrictMode - value: '0' - - key: misc-assert-side-effect.AssertMacros - value: assert - - key: misc-assert-side-effect.CheckFunctionCalls - value: '0' - - key: misc-dangling-handle.HandleClasses - value: 'std::basic_string_view;std::experimental::basic_string_view' + value: '2' - key: misc-definitions-in-headers.HeaderFileExtensions - value: ',h,hh,hpp,hxx' + value: h - key: misc-definitions-in-headers.UseHeaderFileExtension value: '1' - - key: misc-misplaced-widening-cast.CheckImplicitCasts - value: '1' - - key: misc-move-constructor-init.IncludeStyle - value: llvm - - key: misc-sizeof-expression.WarnOnSizeOfCompareToConstant - value: '1' - - key: misc-sizeof-expression.WarnOnSizeOfConstant - value: '1' - - key: misc-sizeof-expression.WarnOnSizeOfThis - value: '1' - - key: misc-string-constructor.LargeLengthThreshold - value: '8388608' - - key: misc-string-constructor.WarnOnLargeLength - value: '1' - - key: misc-suspicious-enum-usage.StrictMode - value: '0' - - key: misc-suspicious-missing-comma.MaxConcatenatedTokens - value: '5' - - key: misc-suspicious-missing-comma.RatioThreshold - value: '0.200000' - - key: misc-suspicious-missing-comma.SizeThreshold - value: '5' - - key: misc-suspicious-string-compare.StringCompareLikeFunctions - value: '' - - key: misc-suspicious-string-compare.WarnOnImplicitComparison - value: '1' - - key: misc-suspicious-string-compare.WarnOnLogicalNotComparison - value: '0' - key: misc-throw-by-value-catch-by-reference.CheckThrowTemporaries value: '1' - - key: modernize-loop-convert.MaxCopySize - value: '16' - - key: modernize-loop-convert.MinConfidence - value: reasonable - - key: modernize-loop-convert.NamingStyle - value: CamelCase - - key: modernize-replace-auto-ptr.IncludeStyle - value: llvm - key: modernize-use-auto.RemoveStars value: '0' - key: modernize-use-default-member-init.UseAssignment @@ -105,26 +62,14 @@ CheckOptions: value: '::std::vector;::std::list;::std::deque' - key: modernize-use-emplace.SmartPointers value: '::std::shared_ptr;::std::unique_ptr;::std::auto_ptr;::std::weak_ptr' - - key: modernize-use-nullptr.NullMacros - value: 'NULL' - - key: modernize-use-transparent-functors.SafeMode - value: '0' - - key: performance-faster-string-find.StringLikeClasses - value: 'std::basic_string' - - key: performance-for-range-copy.WarnOnAllAutoCopies - value: '0' - key: performance-inefficient-string-concatenation.StrictMode value: '0' - - key: performance-type-promotion-in-math-fn.IncludeStyle - value: llvm - - key: performance-unnecessary-value-param.IncludeStyle - value: llvm - key: readability-braces-around-statements.ShortStatementLines - value: '0' + value: '1' - key: readability-function-size.BranchThreshold - value: '4294967295' + value: '100' - key: readability-function-size.LineThreshold - value: '4294967295' + value: '1000' - key: readability-function-size.StatementThreshold value: '800' - key: readability-identifier-naming.AbstractClassCase @@ -393,9 +338,9 @@ CheckOptions: value: '' - key: readability-identifier-naming.VirtualMethodSuffix value: '' - - key: readability-implicit-bool-cast.AllowConditionalIntegerCasts + - key: readability-implicit-bool-conversion.AllowConditionalIntegerCasts value: '0' - - key: readability-implicit-bool-cast.AllowConditionalPointerCasts + - key: readability-implicit-bool-conversion.AllowConditionalPointerCasts value: '0' - key: readability-simplify-boolean-expr.ChainedConditionalAssignment value: '0' diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000000..01f058dd9e --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,124 @@ +name: Aktualizr CI +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] +env: + CCACHE_DIR: ${{ github.workspace }}/.ccache +jobs: + coverage: + name: Coverage on Ubuntu Bionic + runs-on: ubuntu-latest + env: + DOCKER_TAG: docker.pkg.github.com/uptane/aktualizr/aktualizr-ci:bionic-master + DOCKERFILE: docker/Dockerfile.ubuntu.bionic + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + DARGS: >- + -eCCACHE_DIR + -eCODECOV_TOKEN + -eTEST_CMAKE_BUILD_TYPE=Valgrind + -eTEST_WITH_COVERAGE=1 + -eTEST_WITH_P11=1 + -eTEST_WITH_FAULT_INJECTION=1 + -eTEST_TESTSUITE_EXCLUDE=credentials + -eTEST_SOTA_PACKED_CREDENTIALS=dummy-credentials + steps: + - uses: actions/checkout@master + with: + submodules: recursive + - run: git fetch --prune --unshallow + - name: Docker login + if: github.token + run: echo ${{ github.token }} | docker login docker.pkg.github.com -u uptane --password-stdin + - name: Docker build + run: | + docker pull "$DOCKER_TAG" || true + docker build --cache-from "$DOCKER_TAG" --pull -t "$DOCKER_TAG" -f "$DOCKERFILE" . + - uses: actions/cache@v1.1.0 + with: + path: ${{ github.workspace }}/.ccache + key: ubuntu-bionic-${{ github.run_id }} + restore-keys: | + ubuntu-bionic-${{ github.run_id }} + ubuntu-bionic- + - name: Test + run: docker run -v "$PWD:$PWD" -w "$PWD" $DARGS -t "$DOCKER_TAG" ./scripts/test.sh + + nop11: + name: Tests without p11 support on Ubuntu Bionic + runs-on: ubuntu-latest + env: + DOCKER_TAG: docker.pkg.github.com/uptane/aktualizr/aktualizr-ci:bionic-master + DOCKERFILE: docker/Dockerfile.ubuntu.bionic + DARGS: >- + -eCCACHE_DIR + -eTEST_CMAKE_BUILD_TYPE=Debug + -eTEST_WITH_P11=0 + -eTEST_WITH_TESTSUITE=0 + steps: + - uses: actions/checkout@master + with: + submodules: recursive + - run: git fetch --prune --unshallow + - name: Docker login + if: github.token + run: echo ${{ github.token }} | docker login docker.pkg.github.com -u uptane --password-stdin + - name: Docker build + run: | + docker pull "$DOCKER_TAG" || true + docker build --cache-from "$DOCKER_TAG" --pull -t "$DOCKER_TAG" -f "$DOCKERFILE" . + - uses: actions/cache@v1.1.0 + with: + path: ${{ github.workspace }}/.ccache + key: ubuntu-bionic-${{ github.run_id }} + restore-keys: | + ubuntu-bionic-${{ github.run_id }} + ubuntu-bionic- + - name: Test + run: docker run -v "$PWD:$PWD" -w "$PWD" $DARGS -t "$DOCKER_TAG" ./scripts/test.sh + + static-checks: + name: Static checks on Ubuntu Focal + runs-on: ubuntu-latest + env: + DOCKER_TAG: docker.pkg.github.com/uptane/aktualizr/aktualizr-ci:ubuntu-focal-master + DOCKERFILE: docker/Dockerfile.ubuntu.focal + DARGS: >- + -eCCACHE_DIR + -eTEST_CC=clang + -eTEST_CMAKE_BUILD_TYPE=Valgrind + -eTEST_TESTSUITE_ONLY=crypto + -eTEST_WITH_STATICTESTS=1 + -eTEST_WITH_DOCS=1 + steps: + - uses: actions/checkout@master + with: + submodules: recursive + - run: git fetch --prune --unshallow + - name: Docker login + if: github.token + run: echo ${{ github.token }} | docker login docker.pkg.github.com -u uptane --password-stdin + - name: Docker build + run: | + docker pull "$DOCKER_TAG" || true + docker build --cache-from "$DOCKER_TAG" --pull -t "$DOCKER_TAG" -f "$DOCKERFILE" . + - uses: actions/cache@v1.1.0 + with: + path: ${{ github.workspace }}/.ccache + key: ubuntu-focal-${{ github.run_id }} + restore-keys: | + ubuntu-focal-${{ github.run_id }} + ubuntu-focal- + - name: Test + run: docker run -v "$PWD:$PWD" -w "$PWD" $DARGS -t "$DOCKER_TAG" ./scripts/test.sh + + shellcheck: + name: Shellcheck + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Run ShellCheck + uses: ludeeus/action-shellcheck@master + with: + ignore_paths: third_party diff --git a/.github/workflows/update-docker.yml b/.github/workflows/update-docker.yml new file mode 100644 index 0000000000..578e86edc0 --- /dev/null +++ b/.github/workflows/update-docker.yml @@ -0,0 +1,36 @@ +name: Aktualizr CI docker images update +on: + push: + branches: [ master ] +jobs: + update-bionic: + name: Update Ubuntu Bionic Image + runs-on: ubuntu-latest + env: + DOCKER_TAG: docker.pkg.github.com/uptane/aktualizr/aktualizr-ci:bionic-master + DOCKERFILE: docker/Dockerfile.ubuntu.bionic + steps: + - uses: actions/checkout@master + - name: Docker login + run: echo ${{ github.token }} | docker login docker.pkg.github.com -u uptane --password-stdin + - name: Docker build and push + run: | + docker pull "$DOCKER_TAG" || true + docker build --cache-from "$DOCKER_TAG" --pull -t "$DOCKER_TAG" -f "$DOCKERFILE" . + docker push "$DOCKER_TAG" + + update-ubuntu-focal: + name: Update Ubuntu Focal Image + runs-on: ubuntu-latest + env: + DOCKER_TAG: docker.pkg.github.com/uptane/aktualizr/aktualizr-ci:ubuntu-focal-master + DOCKERFILE: docker/Dockerfile.ubuntu.focal + steps: + - uses: actions/checkout@master + - name: Docker login + run: echo ${{ github.token }} | docker login docker.pkg.github.com -u uptane --password-stdin + - name: Docker build and push + run: | + docker pull "$DOCKER_TAG" || true + docker build --cache-from "$DOCKER_TAG" --pull -t "$DOCKER_TAG" -f "$DOCKERFILE" . + docker push "$DOCKER_TAG" diff --git a/.gitignore b/.gitignore index 7e5f36595b..1a78909484 100644 --- a/.gitignore +++ b/.gitignore @@ -46,3 +46,5 @@ __pycache__/ # QT Creator CMakeLists.txt.user /docs/ota-client-guide/modules/ROOT/pages/_junk +*.code-workspace +.vscode/launch.json diff --git a/.gitmodules b/.gitmodules index 528ba7dad6..0556ad6682 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,12 +1,9 @@ [submodule "tests/tuf-test-vectors"] path = tests/tuf-test-vectors - url = https://github.com/advancedtelematic/tuf-test-vectors/ -[submodule "third_party/isotp-c"] - path = third_party/isotp-c - url = https://github.com/advancedtelematic/isotp-c.git -[submodule "third_party/HdrHistogram_c"] - path = third_party/HdrHistogram_c - url = https://github.com/HdrHistogram/HdrHistogram_c + url = https://github.com/advancedtelematic/tuf-test-vectors.git [submodule "third_party/googletest"] path = third_party/googletest url = https://github.com/google/googletest.git +[submodule "third_party/jsoncpp"] + path = third_party/jsoncpp + url = https://github.com/open-source-parsers/jsoncpp diff --git a/.ort.yml b/.ort.yml index 7af4c3d783..184be5594c 100644 --- a/.ort.yml +++ b/.ort.yml @@ -1,14 +1,15 @@ excludes: paths: - - pattern: "partial/extern/**" - reason: "TEST_TOOL_OF" - comment: "This directory contains external dependencies only used by examples which are not distributed." - pattern: "tests/**" reason: "TEST_TOOL_OF" comment: "This directory contains tests which are not distributed." - pattern: "third_party/googletest/**" reason: "TEST_TOOL_OF" - comment: "This directory contains tests which are not distributed." + comment: "This directory contains support code for tests which are not distributed." + - pattern: "third_party/junit/ctest2junit.xsl" + reason: "TEST_TOOL_OF" + comment: "This file contains support code for tests which are not distributed." - pattern: "thirdparty.spdx" reason: "TEST_TOOL_OF" comment: "This file contains metadata which are not distributed." + diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index b72ef8f28e..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -dist: xenial -language: minimal -addons: - apt: - packages: - - "python3" - - "python3-pip" -git: - depth: false -branches: - only: - - master - - /^\d\d\d\d\.\d+(-\w+)?$/ -cache: - directories: - - ccache-bionic - - ccache-xenial - - ccache-debian -env: - matrix: - - DOCKERFILE=docker/Dockerfile.ubuntu.bionic SCRIPT=scripts/test.sh - DARGS="-eTEST_CMAKE_BUILD_TYPE=Valgrind -eTEST_WITH_COVERAGE=1 -eTEST_WITH_P11=1 -eTEST_WITH_DOCKERAPP=1 -eTEST_WITH_FAULT_INJECTION=1 -eTEST_TESTSUITE_EXCLUDE=credentials -eTEST_SOTA_PACKED_CREDENTIALS=dummy-credentials -eCCACHE_DIR=/aktualizr/ccache-bionic" - - DOCKERFILE=docker/Dockerfile.debian.testing SCRIPT=scripts/test.sh - DARGS="-eTEST_CC=clang -eTEST_WITH_LOAD_TESTS=1 -eTEST_WITH_TESTSUITE=0 -eTEST_WITH_STATICTESTS=1 -eTEST_WITH_DOCS=1 -eCCACHE_DIR=/aktualizr/ccache-debian" - - DEPLOY_SRC=1 DEPLOY_PKGS=1 RELEASE_NAME=ubuntu_18.04 DOCKERFILE=docker/Dockerfile.ubuntu.bionic - SCRIPT=scripts/build_ubuntu.sh INSTALL_DOCKERFILE=docker/Dockerfile-test-install.ubuntu.bionic - DARGS="-eTEST_INSTALL_RELEASE_NAME=-ubuntu_18.04 -eCCACHE_DIR=/aktualizr/ccache-bionic" - - DEPLOY_PKGS=1 RELEASE_NAME=ubuntu_16.04 DOCKERFILE=docker/Dockerfile.ubuntu.xenial - SCRIPT=scripts/build_ubuntu.sh INSTALL_DOCKERFILE=docker/Dockerfile-test-install.ubuntu.xenial - DARGS="-eTEST_INSTALL_RELEASE_NAME=-ubuntu_16.04 -eCCACHE_DIR=/aktualizr/ccache-xenial" -services: -- docker -script: -- docker build -t advancedtelematic/aktualizr -f ${DOCKERFILE} . -- ci_env=`bash <(curl -s https://codecov.io/env)` -- timeout --foreground 35m docker run -v $TRAVIS_BUILD_DIR:/aktualizr -v /persistent:/persistent -w /aktualizr $ci_env $DARGS -it advancedtelematic/aktualizr ${SCRIPT} -- if [[ $DEPLOY_PKGS = 1 ]]; then ./scripts/test_garage_deploy_deb.sh /persistent "${INSTALL_DOCKERFILE}"; fi diff --git a/.vscode/launch.json b/.vscode/launch.json deleted file mode 100644 index a7582bc4b5..0000000000 --- a/.vscode/launch.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - // Use IntelliSense to learn about possible attributes. - // Hover to view descriptions of existing attributes. - // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 - "version": "0.2.0", - "configurations": [ - { - "name": "Debug Aktualizr", - "type": "gdb", - "request": "launch", - "target": "./src/aktualizr", - "arguments": "--config ../config/sota_local.toml --loglevel 0", - "cwd": "${workspaceRoot}/build" - }, - { - "type": "gdb", - "request": "attach", - "name": "Attach to valgrind vgdb", - "executable": "./src/aktualizr", - "target": "| vgdb", - "remote": true, - "cwd": "${workspaceRoot}/build" - }, - ] -} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index a68f811a8a..258d6b2ef3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,9 +2,249 @@ This file summarizes notable changes introduced in aktualizr version. It roughly follows the guidelines from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). -Our versioning scheme is `YEAR.N` where `N` is incremented whenever a new release is deemed necessary. Thus it does not exactly map to months of the year. +Our versioning scheme is `YEAR.N` where `N` is incremented whenever a new release is issued. Thus `N` does not necessarily map to months of the year. -## [??? (unreleased)] +## [upcoming release] + +## [2020.10] - 2020-10-27 + +### Added +- Updated the `garage-push` and `garage-deploy` tools. Now, they support new back-end token generation to authenticate API requests. Also, we updated the `treehub.json` format for the new back-end. It now has the additional `scope` parameter. The changes are backward compatible. Previous versions have the server URL **without** the token path, so it needs to be hardcoded. The new version has the full URL with the `/oauth2/token` path at the end: [PR](https://github.com/advancedtelematic/aktualizr/pull/1767) + +### Changed +- Ubuntu Focal Dockerfile now uses the default OSTree package: [PR](https://github.com/advancedtelematic/aktualizr/pull/1751) +- Improved libaktualizr API exceptions: [PR](https://github.com/advancedtelematic/aktualizr/pull/1754) +- Improved binary file download progress: [PR](https://github.com/advancedtelematic/aktualizr/pull/1756) +- Allowed passing HTTP headers in `aktualizr-get`: [PR](https://github.com/advancedtelematic/aktualizr/pull/1762) +- Moved aktualizr-lite to its own [aktualizr-lite repository](https://github.com/foundriesio/aktualizr-lite): [PR](https://github.com/advancedtelematic/aktualizr/pull/1763) + +### Fixed +- Fixed the issue with the parameters check in `aktualizr-get`: [PR](https://github.com/advancedtelematic/aktualizr/pull/1760) +- Fixed the output of the pacman configuration: [PR](https://github.com/advancedtelematic/aktualizr/pull/1761) + +## [2020.9] - 2020-08-26 + +### Added + +- Exceptions thrown through the API are now [documented](include/libaktualizr/aktualizr.h): [PR](https://github.com/advancedtelematic/aktualizr/pull/1737) +- The client TLS certifcate and key can be re-imported from the filesystem as long as the device ID is unchanged: [PR](https://github.com/advancedtelematic/aktualizr/pull/1743) + +### Changed + +- More required headers for libaktualizr usage have been refactored for easier use: [PR](https://github.com/advancedtelematic/aktualizr/pull/1719) +- All code is now checked with clang-tidy-10: [PR](https://github.com/advancedtelematic/aktualizr/pull/1724) +- Default/recommended Yocto branch is dunfell (3.1): [PR](https://github.com/advancedtelematic/aktualizr/pull/1740) + +### Removed + +- The Debain package manager has been removed as it was never fully functional: [PR](https://github.com/advancedtelematic/aktualizr/pull/1739) +- Android support has been removed as it was an unfinished prototype: [PR](https://github.com/advancedtelematic/aktualizr/pull/1732) +- The ISO-TP Secondary has been removed as it was an unmaintained prototype: [PR](https://github.com/advancedtelematic/aktualizr/pull/1732) + + +## [2020.8] - 2020-07-09 + +### Special considerations + +As a result of changes to the IP/POSIX Secondary protocol (see below), users of these Secondaries will need to take special care when upgrading their devices. The new version of aktualizr is backwards compatible and will work with both old and new versions of the protocol. However, aktualizr-secondary is *not*. This means that if you are upgrading a device with IP/POSIX Secondaries, you should update the Primary ECU running aktualizr **first**, and if that is successful, then update your Secondaries. + +### Added + +- You can now use the `SetInstallationRawReport` API function to set a custom raw report field in the device installation result: [PR](https://github.com/advancedtelematic/aktualizr/pull/1628) +- You can now re-register ECUs, which supports replacing the Primary and adding, removing, and replacing Secondaries: [PR](https://github.com/advancedtelematic/aktualizr/pull/1686) +- gcc version 9 is now supported: [PR](https://github.com/advancedtelematic/aktualizr/pull/1714) + +### Changed + +- Improved the Secondary interface and error reporting: [PR](https://github.com/advancedtelematic/aktualizr/pull/1642) +- Improved the Secondary IP/POSIX communication protocol, including streaming binary updates from the Primary to the Secondary: [PR](https://github.com/advancedtelematic/aktualizr/pull/1642) +- Moved the binary update logic to the package manager (and added `images_path` to the configuration): [PR](https://github.com/advancedtelematic/aktualizr/pull/1679) +- The shared provisioning p12 file is now removed from the credentials archive after use. [This can be disabled for testing.](https://github.com/advancedtelematic/aktualizr/blob/master/docs/ota-client-guide/modules/ROOT/pages/aktualizr-config-options.adoc) [PR](https://github.com/advancedtelematic/aktualizr/pull/1697) +- Errors encountered while sending metadata to Secondaries are now reported to the server with greater detail: [PR](https://github.com/advancedtelematic/aktualizr/pull/1703) +- The headers required to include for API users have been simplified: [PR #1707](https://github.com/advancedtelematic/aktualizr/pull/1707), [PR #1713](https://github.com/advancedtelematic/aktualizr/pull/1713), and [PR #1716](https://github.com/advancedtelematic/aktualizr/pull/1716) + + +## [2020.7] - 2020-05-29 + +### Changed + +- Cache device data (network, hardware info...) as much as we can to save bandwidth: [PR](https://github.com/advancedtelematic/aktualizr/pull/1673) +- Stricter matching of Uptane metadata with installed images: [PR](https://github.com/advancedtelematic/aktualizr/pull/1666) + +### Fixed + +- Various docker-app fixes: [PR #1664](https://github.com/advancedtelematic/aktualizr/pull/1664) and [PR #1665](https://github.com/advancedtelematic/aktualizr/pull/1665) +- Use ED25519 to sign manifests when set as key type: [PR](https://github.com/advancedtelematic/aktualizr/pull/1608) + +## [2020.6] - 2020-04-30 + +### Added + +- libaktualizr API and aktualizr-primary command line parameter to provide custom hardware information in JSON format: [PR](https://github.com/advancedtelematic/aktualizr/pull/1644) + +### Changed + +- Improved garage-deploy object fetching performance by reusing the curl handle: [PR](https://github.com/advancedtelematic/aktualizr/pull/1643) +- Added an SQL busy handler with 2 seconds timeout: [PR](https://github.com/advancedtelematic/aktualizr/pull/1648) +- Improved internal exception handling: [PR #1654](https://github.com/advancedtelematic/aktualizr/pull/1654) and [PR #1658](https://github.com/advancedtelematic/aktualizr/pull/1658) + +### Fixed + +- Prevented more failure states from resulting in an installation loop: [PR #1632](https://github.com/advancedtelematic/aktualizr/pull/1632) and [PR #1635](https://github.com/advancedtelematic/aktualizr/pull/1635) +- Allow installaton of 0-byte binary files: [PR](https://github.com/advancedtelematic/aktualizr/pull/1652) +- Refuse to download OSTree targets with the fake/binary package manager: [PR](https://github.com/advancedtelematic/aktualizr/pull/1653) + +### Removed + +- No longer fetch unnumbered Root metadata from the Director: [PR](https://github.com/advancedtelematic/aktualizr/pull/1661) + + +## [2020.5] - 2020-04-01 + +### Changed + +- Fetch garage-sign from new AWS bucket via CNAME: [PR #1619](https://github.com/advancedtelematic/aktualizr/pull/1619) and [PR #1622](https://github.com/advancedtelematic/aktualizr/pull/1622) + +### Fixed + +- Abort update immediately if Secondary metadata verification fails: [PR](https://github.com/advancedtelematic/aktualizr/pull/1612) + + +## [2020.4] - 2020-03-24 + +### Added + +- aktualizr-secondary can now reboot automatically after triggering an update: [PR](https://github.com/advancedtelematic/aktualizr/pull/1578) +- Reports are now stored in the SQL database so they persist through unexpected shutdown: [PR](https://github.com/advancedtelematic/aktualizr/pull/1559) + +### Changed + +- garage-push now always pushes the OSTree ref to Treehub: [PR](https://github.com/advancedtelematic/aktualizr/pull/1575) +- Consistently follow the [Uptane standard's style guide](https://github.com/uptane/uptane-standard#style-guide) when using Uptane concepts, including the metadata output options of aktualizr-info: [PR](https://github.com/advancedtelematic/aktualizr/pull/1591) +- Public contributions now are tested with Github Actions instead of Travis CI: [PR](https://github.com/advancedtelematic/aktualizr/pull/1597) +- Default/recommended Yocto branch is zeus (3.0): [PR](https://github.com/advancedtelematic/aktualizr/pull/1603) +- Improved logging for aktualizr-secondary: [PR](https://github.com/advancedtelematic/aktualizr/pull/1609) + +### Fixed + +- Abort initialization if ECUs are already registered: [PR](https://github.com/advancedtelematic/aktualizr/pull/1579) +- Always use 64-bit integers for disk space arithmetic: [PR](https://github.com/advancedtelematic/aktualizr/pull/1588) +- Reject Director Targets metadata with delegations or repeated ECU IDs: [PR](https://github.com/advancedtelematic/aktualizr/pull/1600) + + +## [2020.3] - 2020-02-27 + +### Added + +- Pluggable package managers for the Primary: [PR](https://github.com/advancedtelematic/aktualizr/pull/1518) +- Log basic device information when starting aktualizr: [PR](https://github.com/advancedtelematic/aktualizr/pull/1555) + +### Changed + +- Wait for Secondaries to come online before attempting installation: [PR #1533](https://github.com/advancedtelematic/aktualizr/pull/1533) and [PR #1562](https://github.com/advancedtelematic/aktualizr/pull/1562) +- Renamed shared libraries to remove the extraneous "\_lib": [PR](https://github.com/advancedtelematic/aktualizr/pull/1564) + +### Fixed + +- Apply pending updates even if their metadata expired if the installation was initiated before the expiration: [PR](https://github.com/advancedtelematic/aktualizr/pull/1548) +- Add a missing include to fix building libaktualizr out-of-tree: [PR](https://github.com/advancedtelematic/aktualizr/pull/1572) +- Restore interrupted downloads correctly: [PR](https://github.com/advancedtelematic/aktualizr/pull/1571) +- Use `uintmax_t` for storing file length to support files greater than 4 GB: [PR](https://github.com/advancedtelematic/aktualizr/pull/1571) + + +## [2020.2] - 2020-01-30 + +### Changed + +- Require OpenSSL >= 1.0.2 explicitly: [PR](https://github.com/advancedtelematic/aktualizr/pull/1487) + +### Fixed + +- Catch the disk space availability exception: [PR](https://github.com/advancedtelematic/aktualizr/pull/1530) +- Correct Secondary target name/filepath in a manifest: [PR](https://github.com/advancedtelematic/aktualizr/pull/1529) + + +## [2020.1] - 2020-01-17 + +### Added + +- Basic file update on IP Secondaries: [PR](https://github.com/advancedtelematic/aktualizr/pull/1518) + +### Changed + +- Increased Targets metadata file size limit: [PR](https://github.com/advancedtelematic/aktualizr/pull/1476) +- Check and fetch Root metadata according to the Uptane standard: [PR](https://github.com/advancedtelematic/aktualizr/pull/1501) +- Don't fetch Snapshot or Targets metadata if we already have the latest: [PR](https://github.com/advancedtelematic/aktualizr/pull/1503) +- Dynamically link aktualizr and the tests with libaktualizr as shared library: [PR](https://github.com/advancedtelematic/aktualizr/pull/1512) +- Reject all targets if one doesn't match: [PR](https://github.com/advancedtelematic/aktualizr/pull/1510) + +### Fixed + +- Do not provision if the Primary times out while connecting to Secondaries: [PR](https://github.com/advancedtelematic/aktualizr/pull/1491) +- Use a bool type instead of a string in the virtual Secondary config: [PR](https://github.com/advancedtelematic/aktualizr/pull/1505) +- Correctly read blob data with null terminators from the SQL database: [PR](https://github.com/advancedtelematic/aktualizr/pull/1502) +- Report installation failure if download or target matching fails: [PR](https://github.com/advancedtelematic/aktualizr/pull/1510) +- Disk space is now checked before downloading binary files to ensure sufficient available disk space: [PR](https://github.com/advancedtelematic/aktualizr/pull/1520) +- Fixed several issues with OSTree updates on IP Secondaries: [PR](https://github.com/advancedtelematic/aktualizr/pull/1518) + + +## [2019.11] - 2019-12-12 + +### Added + +- Allow logger to use stderr: [PR](https://github.com/advancedtelematic/aktualizr/pull/1457) +- Full metadata verification on IP Secondaries: [PR](https://github.com/advancedtelematic/aktualizr/pull/1449) +- Log when connectivity is restored after an interruption: [PR](https://github.com/advancedtelematic/aktualizr/pull/1463) +- Aktualizr now sends its configuration to the backend at boot, for audit purposes: [PR](https://github.com/advancedtelematic/aktualizr/pull/1474) + +### Changed + +- The jsoncpp library is now included as a submodule and was updated to v1.8.4: [PR](https://github.com/advancedtelematic/aktualizr/pull/1462) +- PKCS11 engine paths auto-detection is not done at runtime anymore, but at configure time when possible: [PR](https://github.com/advancedtelematic/aktualizr/pull/1471) + +### Fixed + +- Removed bogus warning at boot when using OSTree: [PR](https://github.com/advancedtelematic/aktualizr/pull/1466) +- Updated the docker-app package manager to work with docker-app v0.8: [PR](https://github.com/advancedtelematic/aktualizr/pull/1468) +- Overriding of log level when using the docker-app package manager: [PR](https://github.com/advancedtelematic/aktualizr/pull/1478) +- Report correct hash of the currently installed version on IP Secondary:: [PR](https://github.com/advancedtelematic/aktualizr/pull/1485) + +## [2019.10] - 2019-11-15 + +### Added + +- Option to send Android repo manifest via garage-push: [PR](https://github.com/advancedtelematic/aktualizr/pull/1440) +- Expanded [C API](https://github.com/advancedtelematic/aktualizr/blob/master/include/libaktualizr-c.h): [PR #1387](https://github.com/advancedtelematic/aktualizr/pull/1387) and [PR #1429](https://github.com/advancedtelematic/aktualizr/pull/1429) + +### Changed + +- Hardware information is only sent if it has changed: [PR](https://github.com/advancedtelematic/aktualizr/pull/1434) +- Builds without OSTree now default to using the binary package manager: [PR](https://github.com/advancedtelematic/aktualizr/pull/1432) +- New endpoint for reporting hardware information: [PR](https://github.com/advancedtelematic/aktualizr/pull/1421) + +### Removed + +- libsystemd dependency and socket activation support: [PR](https://github.com/advancedtelematic/aktualizr/pull/1437) + +### Fixed + +- Enforce a limit of 10 HTTP redirects: [PR](https://github.com/advancedtelematic/aktualizr/pull/1420) +- Reject malformed root.json: [PR](https://github.com/advancedtelematic/aktualizr/pull/1417) +- Fall back on full file download if byte range requests are not supported: [PR](https://github.com/advancedtelematic/aktualizr/pull/1416) + + +## [2019.9] - 2019-10-16 + +### Added + +- Handle POSIX signals: [PR](https://github.com/advancedtelematic/aktualizr/pull/1384) +- Store target custom metadata when installing: [PR](https://github.com/advancedtelematic/aktualizr/pull/1370) + +### Fixed + +- Incorrect installation status reported if installation interrupted: [PR](https://github.com/advancedtelematic/aktualizr/pull/1402) +- Binary updates of Secondaries from an OSTree Primary is again possible: [PR](https://github.com/advancedtelematic/aktualizr/pull/1395) +- Applications built from release tarballs now report a valid version: [PR](https://github.com/advancedtelematic/aktualizr/pull/1415) ## [2019.8] - 2019-09-12 @@ -35,7 +275,7 @@ Our versioning scheme is `YEAR.N` where `N` is incremented whenever a new releas - Uptane metadata is now rechecked (offline) before downloading and installing: [PR](https://github.com/advancedtelematic/aktualizr/pull/1296) - Downloaded target hashes are rechecked before installation: [PR](https://github.com/advancedtelematic/aktualizr/pull/1296) - Failed downloads are now reported to the backend in the installation report: [PR](https://github.com/advancedtelematic/aktualizr/pull/1301) -- Binary targets for an OSTree-based primary are now rejected immediately: [PR](https://github.com/advancedtelematic/aktualizr/pull/1282) +- Binary targets for an OSTree-based Primary are now rejected immediately: [PR](https://github.com/advancedtelematic/aktualizr/pull/1282) ## [2019.6] - 2019-08-21 @@ -44,12 +284,11 @@ Our versioning scheme is `YEAR.N` where `N` is incremented whenever a new releas - garage-sign metadata expiration parameters: [PR](https://github.com/advancedtelematic/ota-tuf/pull/237) - aktualizr-info --wait-until-provisioned flag: [PR](https://github.com/advancedtelematic/aktualizr/pull/1253) -- Target object equality requires that hardware IDs match: [PR](https://github.com/advancedtelematic/aktualizr/pull/1258) - aktualizr-repo image command now requires a hardware ID: [PR](https://github.com/advancedtelematic/aktualizr/pull/1258) - `GetStoredTargets` and `DeleteStoredTarget` aktualizr API methods: [PR](https://github.com/advancedtelematic/aktualizr/pull/1290) -- [aktualizr-get](https://github.com/advancedtelematic/aktualizr/blob/master/src/aktualizr_get/main.cc) debugging tool: [PR](https://github.com/advancedtelematic/aktualizr/pull/1276) +- [aktualizr-get](src/aktualizr_get/main.cc) debugging tool: [PR](https://github.com/advancedtelematic/aktualizr/pull/1276) - Automatic reboot command is now customizable: [PR](https://github.com/advancedtelematic/aktualizr/pull/1274) -- Basic [C API](https://github.com/advancedtelematic/aktualizr/blob/master/include/libaktualizr-c.h): [PR](https://github.com/advancedtelematic/aktualizr/pull/1263) +- Basic [C API](include/libaktualizr-c.h): [PR](https://github.com/advancedtelematic/aktualizr/pull/1263) - Ability to pass custom headers in HTTP requests: [PR](https://github.com/advancedtelematic/aktualizr/pull/1251) - Mutual TLS support in garage tools: [PR #1243](https://github.com/advancedtelematic/aktualizr/pull/1243) and [PR #1288](https://github.com/advancedtelematic/aktualizr/pull/1288) @@ -69,7 +308,7 @@ Our versioning scheme is `YEAR.N` where `N` is incremented whenever a new releas - TLS support by aktualizr-lite: [PR](https://github.com/advancedtelematic/aktualizr/pull/1237) - automatic garage-check usage at the end of garage-push/deploy: [PR](https://github.com/advancedtelematic/aktualizr/pull/1244) -- ccache support: [PR] (https://github.com/advancedtelematic/aktualizr/pull/1248, https://github.com/advancedtelematic/aktualizr/pull/1249) +- ccache support: [PR #1248](https://github.com/advancedtelematic/aktualizr/pull/1248) and [PR #1249](https://github.com/advancedtelematic/aktualizr/pull/1249) - doc on Primary and Secondary bitbaking for RPi: [PR](https://github.com/advancedtelematic/aktualizr/pull/1238) ### Changed @@ -80,14 +319,14 @@ Our versioning scheme is `YEAR.N` where `N` is incremented whenever a new releas ### Removed - Jenkins pipeline and a few references: [PR](https://github.com/advancedtelematic/aktualizr/pull/1236) -- Test repo meta: [PR](https://github.com/advancedtelematic/aktualizr/pull/1239) +- Hardcoded repo metadata used for testing: [PR](https://github.com/advancedtelematic/aktualizr/pull/1239) - SecondaryFactory and VirtualSecondary out of libaktualizr: [PR](https://github.com/advancedtelematic/aktualizr/pull/1241) - Fallback on clang-{tidy,format}: [PR](https://github.com/advancedtelematic/aktualizr/pull/1240) ### Fixed - Logic of finding the latest version by aktualizr-lite: [PR](https://github.com/advancedtelematic/aktualizr/pull/1247) -- Test regression in docker-app-mgr: [PR] (https://github.com/advancedtelematic/aktualizr/pull/1250) +- Test regression in docker-app-mgr: [PR](https://github.com/advancedtelematic/aktualizr/pull/1250) - Some more lintian fixes: [PR](https://github.com/advancedtelematic/aktualizr/pull/1242) @@ -97,16 +336,16 @@ Our versioning scheme is `YEAR.N` where `N` is incremented whenever a new releas - Campaigns can be declined and postponed via the API: [PR](https://github.com/advancedtelematic/aktualizr/pull/1225) - Warn when running two libaktualizr instances simultaneously: [PR #1217](https://github.com/advancedtelematic/aktualizr/pull/1217) and [PR #1229](https://github.com/advancedtelematic/aktualizr/pull/1229) -- aktualizr-info can output the Snapshot and Timestamp metadata from the Images repository: [PR](https://github.com/advancedtelematic/aktualizr/pull/1207) -- aktualizr-info can output the current and pending image versions for secondaries: [PR](https://github.com/advancedtelematic/aktualizr/pull/1201) +- aktualizr-info can output the Snapshot and Timestamp metadata from the Image repository: [PR](https://github.com/advancedtelematic/aktualizr/pull/1207) +- aktualizr-info can output the current and pending image versions for Secondaries: [PR](https://github.com/advancedtelematic/aktualizr/pull/1201) - [Support for docker-app package management on top of OSTree](src/libaktualizr/package_manager/dockerappmanager.h): [PR](https://github.com/advancedtelematic/aktualizr/pull/1189) ### Changed -- [Provisioning methods have been renamed](https://github.com/advancedtelematic/aktualizr/blob/master/docs/client-provisioning-methods.adoc). "Autoprovisioning" or "automatic provisioning" is now known as "shared credential provisioning". "Implicit provisioning" is now known as "device credential provisioning". "HSM provisioning" was always a misnomer, so it is now refered to as "device credential provisioning with an HSM". [PR# 1208](https://github.com/advancedtelematic/aktualizr/pull/1208) and [PR #1220](https://github.com/advancedtelematic/aktualizr/pull/1220) +- [Provisioning methods have been renamed](https://github.com/advancedtelematic/aktualizr/blob/master/docs/ota-client-guide/modules/ROOT/pages/client-provisioning-methods.adoc). "Autoprovisioning" or "automatic provisioning" is now known as "shared credential provisioning". "Implicit provisioning" is now known as "device credential provisioning". "HSM provisioning" was always a misnomer, so it is now refered to as "device credential provisioning with an HSM". [PR #1208](https://github.com/advancedtelematic/aktualizr/pull/1208) and [PR #1220](https://github.com/advancedtelematic/aktualizr/pull/1220) - aktualizr-cert-provider is now included in the garage_deploy.deb releases: [PR](https://github.com/advancedtelematic/aktualizr/pull/1218) - aktualizr-info metadata and key output is now printed without additional text for easier machine parsing (and piping to jq): [PR](https://github.com/advancedtelematic/aktualizr/pull/1215) -- The IP secondary implementation has been substantially refactored and improved with support for POSIX sockets and easier configuration: [PR #1183](https://github.com/advancedtelematic/aktualizr/pull/1183) and [PR #1198](https://github.com/advancedtelematic/aktualizr/pull/1198) +- The IP Secondary implementation has been substantially refactored and improved with support for POSIX sockets and easier configuration: [PR #1183](https://github.com/advancedtelematic/aktualizr/pull/1183) and [PR #1198](https://github.com/advancedtelematic/aktualizr/pull/1198) ### Removed @@ -118,10 +357,10 @@ Our versioning scheme is `YEAR.N` where `N` is incremented whenever a new releas ### Added - New tool aktualizr-lite for anonymous TUF-only updates: [PR](https://github.com/advancedtelematic/aktualizr/pull/1107) -- [Abort() API call](src/libaktualizr/primary/aktualizr.h): [PR](https://github.com/advancedtelematic/aktualizr/pull/1122) +- [Abort() API call](include/libaktualizr/aktualizr.h): [PR](https://github.com/advancedtelematic/aktualizr/pull/1122) - [Option to print delegation metadata with aktualizr-info](src/aktualizr_info/main.cc): [PR](https://github.com/advancedtelematic/aktualizr/pull/1138) - Support for custom URIs for downloading targets: [PR](https://github.com/advancedtelematic/aktualizr/pull/1147) -- [SendManifest() API call](src/libaktualizr/primary/aktualizr.h): [PR](https://github.com/advancedtelematic/aktualizr/pull/1176) +- [SendManifest() API call](include/libaktualizr/aktualizr.h): [PR](https://github.com/advancedtelematic/aktualizr/pull/1176) - [Support for Android package management](src/libaktualizr/package_manager/androidmanager.h): [PR](https://github.com/advancedtelematic/aktualizr/pull/1034) ### Changed @@ -133,12 +372,12 @@ Our versioning scheme is `YEAR.N` where `N` is incremented whenever a new releas ### Removed - example.com is no longer set as the default URL when using garage-sign via garage-deploy: [PR](https://github.com/advancedtelematic/aktualizr/pull/1169) -- OPC-UA secondary support: [PR](https://github.com/advancedtelematic/aktualizr/pull/1177) +- OPC-UA Secondary support: [PR](https://github.com/advancedtelematic/aktualizr/pull/1177) ### Fixed - Check for updates even if sending the manifest fails: [PR](https://github.com/advancedtelematic/aktualizr/pull/1186) -- Correctly handle empty targets metadata: [PR #1186](https://github.com/advancedtelematic/aktualizr/pull/1186) and [PR #1192](https://github.com/advancedtelematic/aktualizr/pull/1192) +- Correctly handle empty Targets metadata: [PR #1186](https://github.com/advancedtelematic/aktualizr/pull/1186) and [PR #1192](https://github.com/advancedtelematic/aktualizr/pull/1192) - Various OSTree-related memory leaks and suppressions: [PR #1114](https://github.com/advancedtelematic/aktualizr/pull/1114), [PR #1120](https://github.com/advancedtelematic/aktualizr/pull/1120), and [PR #1179](https://github.com/advancedtelematic/aktualizr/pull/1179) - Various spurious and/or confusing log messages, e.g.: [PR #1112](https://github.com/advancedtelematic/aktualizr/pull/1112), [PR #1137](https://github.com/advancedtelematic/aktualizr/pull/1137), and [PR #1180](https://github.com/advancedtelematic/aktualizr/pull/1180) @@ -147,7 +386,7 @@ Our versioning scheme is `YEAR.N` where `N` is incremented whenever a new releas ### Added -- A new uptane configuration parameter `force_install_completion` that triggers a system reboot at the end of the installation process for update mechanisms that need one to complete (e.g. OSTree package manager) +- A new configuration parameter `force_install_completion` that triggers a system reboot at the end of the installation process for update mechanisms that need one to complete (e.g. OSTree package manager) - Support for delegations: [PR #1074](https://github.com/advancedtelematic/aktualizr/pull/1074) and [PR #1089](https://github.com/advancedtelematic/aktualizr/pull/1089) - Backward migrations of the SQL storage is now supported. It should allow rollbacking updates up to versions containing the feature: [PR](https://github.com/advancedtelematic/aktualizr/pull/1072) @@ -162,8 +401,8 @@ Our versioning scheme is `YEAR.N` where `N` is incremented whenever a new releas ### Changed -- [Most API calls refactored to return immediately with a future](src/libaktualizr/primary/aktualizr.h) -- With an OStree primary, an installation is now considered successful when the device reboots with the new file system version. Before that, the installation is still considered in progress. +- [Most API calls refactored to return immediately with a future](include/libaktualizr/aktualizr.h) +- With an OSTree Primary, an installation is now considered successful when the device reboots with the new file system version. Before that, the installation is still considered in progress. - [Running modes in libaktualizr have been replaced by simpler logic in the aktualizr wrapper](src/aktualizr_primary/main.cc): [PR](https://github.com/advancedtelematic/aktualizr/pull/1039) - Tests now use ed25519 as the default key type: [PR](https://github.com/advancedtelematic/aktualizr/pull/1038) - Improved performance of garage-deploy: [PR](https://github.com/advancedtelematic/aktualizr/pull/1020) @@ -188,12 +427,12 @@ Our versioning scheme is `YEAR.N` where `N` is incremented whenever a new releas ### Added -- [Ability to pause and resume binary update downloads](src/libaktualizr/primary/aktualizr.h) +- [Ability to pause and resume binary update downloads](include/libaktualizr/aktualizr.h) - Expose download binary targets in API ### Changed -- Secondaries configuration files must now lie in a common directory and specified in command line arguments or in static configuration: [documentation](docs/configuration.adoc#uptane) +- Secondaries configuration files must now lie in a common directory and specified in command line arguments or in static configuration: [documentation](docs/ota-client-guide/modules/ROOT/pages/aktualizr-config-options.adoc#uptane) - API has been upgraded: FetchMeta has been merged with CheckUpdates and most functions now have meaningful return values. ### Removed diff --git a/CMakeLists.txt b/CMakeLists.txt index 54b44701f3..876052be12 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12,12 +12,6 @@ if(NOT CMAKE_BUILD_TYPE) message(STATUS "No CMAKE_BUILD_TYPE specified, defaulting to ${CMAKE_BUILD_TYPE}") endif(NOT CMAKE_BUILD_TYPE) -if("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux") - set(BUILD_SYSTEMD_DEFAULT ON) -else() - set(BUILD_SYSTEMD_DEFAULT OFF) -endif() - if(CMAKE_BUILD_TYPE MATCHES "Valgrind") set(TESTSUITE_VALGRIND_DEFAULT ON) else() @@ -27,26 +21,22 @@ endif() option(WARNING_AS_ERROR "Treat warnings as errors" ON) option(PEDANTIC_WARNINGS "Compile with pedantic warnings" OFF) option(BUILD_WITH_CODE_COVERAGE "Enable gcov code coverage" OFF) -option(BUILD_OSTREE "Set to ON to compile with ostree support" OFF) +option(BUILD_OSTREE "Set to ON to compile with OSTree support" OFF) option(BUILD_DEB "Set to ON to compile with debian packages support" OFF) -option(BUILD_DOCKERAPP "Set to ON to compile with package manager support of docker-app" OFF) option(BUILD_P11 "Support for key storage in a HSM via PKCS#11" OFF) option(BUILD_SOTA_TOOLS "Set to ON to build SOTA tools" OFF) -option(BUILD_ISOTP "Set to ON to compile with ISO/TP protocol support" OFF) -option(BUILD_SYSTEMD "Set to ON to compile with systemd additional support" ${BUILD_SYSTEMD_DEFAULT}) -option(BUILD_LOAD_TESTS "Set to ON to build load tests" OFF) option(FAULT_INJECTION "Set to ON to enable fault injection" OFF) -option(INSTALL_LIB "Set to ON to install library and headers" OFF) option(TESTSUITE_VALGRIND "Set to ON to make tests to run under valgrind (default when CMAKE_BUILD_TYPE=Valgrind)" ${TESTSUITE_VALGRIND_DEFAULT}) option(CCACHE "Set to ON to use ccache if available" ON) +# Adhere to GNU filesystem layout conventions; provides CMAKE_INSTALL_* macros +include(GNUInstallDirs) + set(SOTA_PACKED_CREDENTIALS "" CACHE STRING "Credentials.zip for tests involving the server") set(TESTSUITE_ONLY "" CACHE STRING "Only run tests matching this list of labels") set(TESTSUITE_EXCLUDE "" CACHE STRING "Exclude tests matching this list of labels") -set(STORAGE_TYPE "sqlite" CACHE STRING "") - if("${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}") message(FATAL_ERROR "Aktualizr does not support building in the source tree. Please remove CMakeCache.txt and the CMakeFiles/ directory, then create a subdirectory to build in: mkdir build; cd build; cmake ..") endif() @@ -58,6 +48,10 @@ unset(AKTUALIZR_CHECKED_SRCS CACHE) set(CMAKE_POSITION_INDEPENDENT_CODE ON) +# To ensure better support of large files on 32-bit systems. +# See https://www.gnu.org/software/libc/manual/html_node/Feature-Test-Macros.html +add_definitions("-D_FILE_OFFSET_BITS=64") + if (CCACHE) find_program(CCACHE_PROGRAM ccache) if (CCACHE_PROGRAM) @@ -70,89 +64,90 @@ set(BOOST_COMPONENTS log_setup log system filesystem program_options) set(Boost_USE_STATIC_LIBS OFF) add_definitions(-DBOOST_LOG_DYN_LINK) -if(ANDROID) - set(BOOST_ROOT ${PREBUILD_BOOST_ROOT}) - set(Boost_INCLUDE_DIR ${BOOST_ROOT}/include) - set(Boost_LIBRARY_DIR ${BOOST_ROOT}/libs/llvm/${ANDROID_ABI}) - if(Boost_FOUND) - include_directories(${Boost_INCLUDE_DIRS}) - endif() -endif(ANDROID) - # Mac brew library install paths - -if(EXISTS /usr/local/opt/openssl) - list(APPEND CMAKE_PREFIX_PATH /usr/local/opt/openssl) +if(EXISTS /opt/homebrew/opt/openssl@1.1) + list(APPEND CMAKE_PREFIX_PATH /opt/homebrew/opt/openssl@1.1) endif() -if(EXISTS /usr/local/opt/libarchive) - list(APPEND CMAKE_PREFIX_PATH /usr/local/opt/libarchive) +if(EXISTS /opt/homebrew/opt/libarchive/include) + list(APPEND CMAKE_PREFIX_PATH /opt/homebrew/opt/libarchive/include) endif() -find_package(PkgConfig REQUIRED) -find_package(Boost 1.57.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) +find_package(Boost 1.58.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) find_package(CURL REQUIRED) -find_package(OpenSSL REQUIRED) +find_package(OpenSSL 1.0.2 REQUIRED) find_package(Threads REQUIRED) find_package(LibArchive REQUIRED) find_package(sodium REQUIRED) find_package(SQLite3 REQUIRED) find_package(Git) find_package(Asn1c REQUIRED) -find_package(ZLIB) if(NOT AKTUALIZR_VERSION) - if(GIT_EXECUTABLE) - execute_process(COMMAND sh -c "${GIT_EXECUTABLE} -C ${PROJECT_SOURCE_DIR} describe | tr -d '\n'" OUTPUT_VARIABLE AKTUALIZR_VERSION) - message(STATUS "Setting version to ${AKTUALIZR_VERSION}") - else(GIT_EXECUTABLE) - message(WARNING "Version is not set and git is not available, set version to an arbitrary string") - set(AKTUALIZR_VERSION "0.0-dev") - endif(GIT_EXECUTABLE) + if (EXISTS ${PROJECT_SOURCE_DIR}/VERSION) + file(READ ${PROJECT_SOURCE_DIR}/VERSION AKTUALIZR_VERSION) + string(STRIP "${AKTUALIZR_VERSION}" AKTUALIZR_VERSION) + elseif (GIT_EXECUTABLE) + execute_process(COMMAND ${PROJECT_SOURCE_DIR}/scripts/get_version.sh ${GIT_EXECUTABLE} ${PROJECT_SOURCE_DIR} OUTPUT_VARIABLE AKTUALIZR_VERSION RESULT_VARIABLE RES) + if (NOT RES EQUAL 0) + message(FATAL_ERROR "Could not get current version from git") + endif () + endif () + if (NOT AKTUALIZR_VERSION) + message(FATAL_ERROR "Version was not detected properly, verify that you have either a git checkout or a VERSION file present in aktualizr's main directory") + endif () + message(STATUS "Setting version to ${AKTUALIZR_VERSION}") endif(NOT AKTUALIZR_VERSION) if(BUILD_OSTREE) find_package(OSTree REQUIRED) add_definitions(-DBUILD_OSTREE) else(BUILD_OSTREE) - unset(LIBOSTREE_LIBRARIES CACHE) + # The sota tools depend on libostree so no reason to unset LIBOSTREE_LIBRARIES if they are enabled + if (NOT BUILD_SOTA_TOOLS) + unset(LIBOSTREE_LIBRARIES CACHE) + endif(NOT BUILD_SOTA_TOOLS) endif(BUILD_OSTREE) -if(BUILD_DOCKERAPP) - add_definitions(-DBUILD_DOCKERAPP) -endif(BUILD_DOCKERAPP) - if(BUILD_P11) find_package(LibP11 REQUIRED) add_definitions(-DBUILD_P11) -endif(BUILD_P11) -# Setup PKCS11 -if(TEST_PKCS11_MODULE_PATH) - add_definitions(-DTEST_PKCS11_MODULE_PATH="${TEST_PKCS11_MODULE_PATH}" - -DTEST_PKCS11_ENGINE_PATH="${TEST_PKCS11_ENGINE_PATH}") -endif(TEST_PKCS11_MODULE_PATH) + if (NOT PKCS11_ENGINE_PATH) + if (CMAKE_CROSSCOMPILING) + message(FATAL_ERROR "Please define the PKCS11_ENGINE_PATH on your target system") + endif() + foreach(engine IN ITEMS "/usr/lib/engines-1.1/pkcs11.so" + "/usr/lib/engines/pkcs11.so" + "/usr/lib/x86_64-linux-gnu/engines-1.1/pkcs11.so" + "/usr/lib/arm-linux-gnueabihf/engines-1.1/pkcs11.so") + + if(EXISTS ${engine}) + message(STATUS "Detected pkcs11 engine path: ${engine}") + set(PKCS11_ENGINE_PATH ${engine}) + endif() + endforeach() + + if (NOT PKCS11_ENGINE_PATH) + message(FATAL_ERROR "Could not auto-detect path of PKCS11 engine, please specify PKCS11_ENGINE_PATH") + endif() + + set(PKCS11_ENGINE_PATH "${PKCS11_ENGINE_PATH}" CACHE STRING "Path to PKCS#11 engine library") + endif() +endif(BUILD_P11) if(BUILD_SOTA_TOOLS) find_package(GLIB2 REQUIRED) find_program(STRACE NAMES strace) + # The sota tools depend on libostree, but they don't require/depend on software enabled by BUILD_OSTREE flag + find_package(OSTree REQUIRED) endif(BUILD_SOTA_TOOLS) -if(BUILD_ISOTP) - add_definitions(-DISOTP_SECONDARY_ENABLED) -endif(BUILD_ISOTP) - -if(BUILD_SYSTEMD) - find_package(Systemd REQUIRED) -else(BUILD_SYSTEMD) - unset(SYSTEMD_LIBRARY CACHE) -endif(BUILD_SYSTEMD) - if(FAULT_INJECTION) find_package(Libfiu REQUIRED) add_definitions(-DFIU_ENABLE) link_libraries(fiu dl) - install(PROGRAMS scripts/fiu DESTINATION bin COMPONENT aktualizr) + install(PROGRAMS scripts/fiu DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT aktualizr) endif(FAULT_INJECTION) # flags for different build types @@ -186,12 +181,6 @@ if (CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)") add_definitions(-Wshadow) endif () - if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND CMAKE_SYSTEM_NAME MATCHES "Android") - # treat boost headers as system to ignore warnings, - # this is alternative to in-code diagnostics disabling - add_definitions(--system-header-prefix=boost/) - endif () - if (WARNING_AS_ERROR) add_definitions(-Werror) endif () @@ -228,9 +217,8 @@ if(TESTSUITE_VALGRIND) endif() add_custom_target(build_tests) -# clang-check and clang-format -find_program(CLANG_FORMAT NAMES clang-format-6.0) -find_program(CLANG_TIDY NAMES clang-tidy-6.0) +find_program(CLANG_FORMAT NAMES clang-format-11) +find_program(CLANG_TIDY NAMES clang-tidy-12 clang-tidy-11) if(CLANG_FORMAT) function(aktualizr_clang_format) @@ -253,7 +241,7 @@ if(CLANG_FORMAT) endforeach() endfunction() else() - message(WARNING "clang-format-6.0 not found, skipping") + message(WARNING "clang-format-11 not found, skipping") function(aktualizr_clang_format) endfunction() endif() @@ -264,22 +252,16 @@ if(CLANG_TIDY) function(aktualizr_clang_tidy) file(RELATIVE_PATH SUBDIR ${CMAKE_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) foreach(FILE ${ARGN}) - if(${FILE} MATCHES "\\.h") - # do not run clang-tidy directly on header files, since it - # ignores them. headers will be checked if they are included by - # a checked source file - continue() - endif() string(REPLACE "/" "_" TARGETNAME "aktualizr_clang_tidy-${SUBDIR}-${FILE}") add_custom_target(${TARGETNAME} - COMMAND ${CLANG_TIDY} -quiet -header-filter=\(${CMAKE_SOURCE_DIR}|\\.\\.\)/src/.* --extra-arg-before=-Wno-unknown-warning-option -format-style=file -p ${CMAKE_BINARY_DIR} ${FILE} + COMMAND ${PROJECT_SOURCE_DIR}/scripts/clang-tidy-wrapper.sh ${CLANG_TIDY} ${CMAKE_BINARY_DIR} ${CMAKE_SOURCE_DIR} ${FILE} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} VERBATIM) add_dependencies(clang-tidy ${TARGETNAME}) endforeach() endfunction() else() - message(WARNING "clang-tidy-6.0 not found, skipping") + message(WARNING "Unable to find clang-tidy-12, clang-tidy-11, or clang-tidy-10; skipping") function(aktualizr_clang_tidy) endfunction() endif() @@ -308,6 +290,17 @@ function(aktualizr_source_file_checks) endif() endfunction() +find_program(SHELLCHECK NAMES shellcheck) +if(SHELLCHECK) + add_custom_target(shellcheck + COMMAND find docs/ scripts/ src/ tests/ -name "*.sh" | xargs ${SHELLCHECK} + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + VERBATIM) + + add_dependencies(qa shellcheck) +else() + message(WARNING "shellcheck not found, skipping") +endif() # Use C++11, but without GNU or other extensions set(CMAKE_CXX_STANDARD 11) @@ -317,24 +310,22 @@ set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) ############### BUILD RULES +include_directories(${PROJECT_SOURCE_DIR}/include) include_directories(${PROJECT_SOURCE_DIR}/src/libaktualizr) -include_directories(${PROJECT_SOURCE_DIR}/third_party/googletest/googletest/include) -include_directories(${PROJECT_SOURCE_DIR}/third_party/jsoncpp) -include_directories(${Boost_INCLUDE_DIR}) -include_directories(${LIBOSTREE_INCLUDE_DIRS}) -include_directories(${SQLITE3_INCLUDE_DIRS}) -include_directories(${LIBP11_INCLUDE_DIR}) -include_directories(${sodium_INCLUDE_DIR}) -include_directories(${OPENSSL_INCLUDE_DIR}) -include_directories(${CURL_INCLUDE_DIR}) -include_directories(${LibArchive_INCLUDE_DIR}) - -set_source_files_properties(third_party/jsoncpp/jsoncpp.cpp PROPERTIES COMPILE_FLAGS -w) -add_library(jsoncpp OBJECT third_party/jsoncpp/jsoncpp.cpp) +include_directories(SYSTEM ${PROJECT_SOURCE_DIR}/third_party/googletest/googletest/include) +include_directories(SYSTEM ${JSONCPP_INCLUDE_DIRS}) +include_directories(SYSTEM ${Boost_INCLUDE_DIR}) +include_directories(SYSTEM ${LIBOSTREE_INCLUDE_DIRS}) +include_directories(SYSTEM ${SQLITE3_INCLUDE_DIRS}) +include_directories(SYSTEM ${LIBP11_INCLUDE_DIR}) +include_directories(SYSTEM ${sodium_INCLUDE_DIR}) +include_directories(SYSTEM ${OPENSSL_INCLUDE_DIR}) +include_directories(SYSTEM ${CURL_INCLUDE_DIR}) +include_directories(SYSTEM ${LibArchive_INCLUDE_DIR}) # General packaging configuration set(CPACK_GENERATOR "DEB") -set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Patrick Vacek ") +set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Patti Vacek ") set(CPACK_DEB_COMPONENT_INSTALL ON) set(CPACK_COMPONENTS_GROUPING ONE_PER_GROUP) set(CPACK_DEBIAN_PACKAGE_VERSION ${AKTUALIZR_VERSION}) @@ -347,11 +338,15 @@ if(BUILD_SOTA_TOOLS) set(CPACK_COMPONENT_GARAGE_DEPLOY_DESCRIPTION "garage-deploy utility") set(CPACK_DEBIAN_GARAGE_DEPLOY_PACKAGE_SHLIBDEPS ON) set(CPACK_DEBIAN_GARAGE_DEPLOY_PACKAGE_DEPENDS "openjdk-8-jre") + if(SOTA_DEBIAN_PACKAGE_DEPENDS) + set(CPACK_DEBIAN_GARAGE_DEPLOY_PACKAGE_DEPENDS ${SOTA_DEBIAN_PACKAGE_DEPENDS}) + endif() + message(STATUS "Using ${CPACK_DEBIAN_GARAGE_DEPLOY_PACKAGE_DEPENDS} as Debian package depends.") endif(BUILD_SOTA_TOOLS) set(CPACK_DEBIAN_AKTUALIZR_PACKAGE_NAME "aktualizr") set(CPACK_DEBIAN_AKTUALIZR_FILE_NAME "aktualizr.deb") # only available for CMake >= 3.6.0 -set(CPACK_COMPONENT_AKTUALIZR_DESCRIPTION "UPTANE-compliant embedded software update client\n Aktualizr communicates with OTA-Connect,\n reports device information, fetch and install package updates") +set(CPACK_COMPONENT_AKTUALIZR_DESCRIPTION "Uptane-compliant embedded software update client\n Aktualizr communicates with OTA Connect,\n reports device information, and fetches and installs package updates") set(CPACK_DEBIAN_AKTUALIZR_PACKAGE_DEPENDS "lshw") set(CPACK_DEBIAN_AKTUALIZR_PACKAGE_SHLIBDEPS ON) set(CPACK_DEBIAN_AKTUALIZR_PACKAGE_CONTROL_EXTRA "${PROJECT_SOURCE_DIR}/config/systemd/debian-control/postinst;${PROJECT_SOURCE_DIR}/config/systemd/debian-control/prerm;${PROJECT_SOURCE_DIR}/config/systemd/debian-control/postrm;") @@ -364,26 +359,21 @@ set (AKTUALIZR_EXTERNAL_LIBS ${Boost_LIBRARIES} ${CURL_LIBRARIES} ${OPENSSL_LIBRARIES} - ${CMAKE_THREAD_LIBS_INIT} + Threads::Threads ${sodium_LIBRARY_RELEASE} ${LIBOSTREE_LIBRARIES} ${SQLITE3_LIBRARIES} ${LibArchive_LIBRARIES} ${LIBP11_LIBRARIES} - ${GLIB2_LIBRARIES} - ${SYSTEMD_LIBRARY} - ${ZLIB_LIBRARY}) - -if(ANDROID) - list(APPEND AKTUALIZR_EXTERNAL_LIBS liblog.so) -endif() + ${GLIB2_LIBRARIES}) get_directory_property(hasParent PARENT_DIRECTORY) if(hasParent) set (AKTUALIZR_EXTERNAL_LIBS ${AKTUALIZR_EXTERNAL_LIBS} PARENT_SCOPE) endif() -set (TEST_LIBS ${AKTUALIZR_EXTERNAL_LIBS} gtest gmock) +include(AddAktualizrTest) +set (TEST_LIBS gtest gmock testutilities aktualizr_lib) if(BUILD_WITH_CODE_COVERAGE) set(COVERAGE_LCOV_EXCLUDES '/usr/include/*' ${CMAKE_BINARY_DIR}'*' ${CMAKE_SOURCE_DIR}'/third_party/*' ${CMAKE_SOURCE_DIR}'/tests/*' '*_test.cc') include(CodeCoverage) @@ -395,19 +385,19 @@ ENABLE_TESTING() # It would be great to use GTEST_OUTPUT directly, but I couldn't get it to work. set(GOOGLE_TEST_OUTPUT --gtest_output=xml:${CMAKE_BINARY_DIR}/results/) -add_subdirectory("fuzz") +# amalgamate jsoncpp source at compile time +# note: jsoncpp has a CMake support that they intent to deprecate and is hard to +# integrate with. +execute_process(COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/scripts/amalgamate-jsoncpp.sh ${CMAKE_CURRENT_SOURCE_DIR}/third_party/jsoncpp ${CMAKE_CURRENT_BINARY_DIR}/jsoncpp) +include_directories(${PROJECT_BINARY_DIR}/jsoncpp) +# jsoncpp triggers a number of warnings that are turned on by default in our build +set_source_files_properties(${PROJECT_BINARY_DIR}/jsoncpp/jsoncpp.cc PROPERTIES + COMPILE_FLAGS "-Wno-error -Wno-float-equal -Wno-switch-default -Wno-deprecated-declarations") +add_library(jsoncpp OBJECT ${PROJECT_BINARY_DIR}/jsoncpp/jsoncpp.cc) + add_subdirectory("config") -if (BUILD_LOAD_TESTS) - # HdrHistogram installs support libraries, which creates junk the aktualizr - # recipe would have to remove - # Temporarily remove flag not supported by isotp-c and bitfield. - remove_definitions(-Wconversion) - add_subdirectory("third_party/HdrHistogram_c/src") - add_definitions(-Wconversion) -endif(BUILD_LOAD_TESTS) add_subdirectory("src") add_subdirectory("tests" EXCLUDE_FROM_ALL) - add_subdirectory("docs/doxygen") # Check if some source files were not added sent to `aktualizr_source_file_checks` @@ -417,11 +407,6 @@ add_subdirectory("docs/doxygen") file(GLOB_RECURSE ALL_SOURCE_FILES RELATIVE ${CMAKE_SOURCE_DIR} src/*.cc src/*.c src/*.h) foreach(FILE ${ALL_SOURCE_FILES}) - string (FIND ${FILE} "/isotp_conn/" EXCLUDE_DIR_FOUND) - if (NOT ${EXCLUDE_DIR_FOUND} EQUAL -1) - continue() - endif() - list(FIND AKTUALIZR_CHECKED_SRCS ${FILE} INDEX) if (${INDEX} EQUAL "-1") message(FATAL_ERROR "${FILE} not checked") @@ -434,33 +419,4 @@ add_custom_target(tags COMMAND ctags -R --c++-kinds=+p --fields=+iaS --extra=+q src WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}) -####### Fuzz tags -option(ENABLE_SANITIZERS "Enables AddressSanitizer and UndefinedBehaviorSanitizer." OFF) - -include(CheckCCompilerFlag) -if (ENABLE_SANITIZERS) - list(APPEND custom_compiler_flags - -fno-omit-frame-pointer - -fsanitize=address - -fsanitize=undefined - -fsanitize=float-divide-by-zero - -fsanitize=float-cast-overflow - -fsanitize-address-use-after-scope - -fsanitize=integer - -01 - -fno-sanitize-recover - ) -endif() - -# apply custom compiler flags -foreach(compiler_flag ${custom_compiler_flags}) - #remove problematic characters - string(REGEX REPLACE "[^a-zA-Z0-9]" "" current_variable ${compiler_flag}) - - CHECK_C_COMPILER_FLAG(${compiler_flag} "FLAG_SUPPORTED_${current_variable}") - if (FLAG_SUPPORTED_${current_variable}) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${compiler_flag}") - endif() -endforeach() - # vim: set tabstop=4 shiftwidth=4 expandtab: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index df6d339292..1e8dc3af9e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -13,41 +13,84 @@ Code quality and style All code should be developed according to the [Google C++ style guide](https://google.github.io/styleguide/cppguide.html). In addition, the code should conform to the following guidelines: - * Code should be covered by tests - - Wherever possible, automated testing should be used - - Tests cases should at least exercise all documented requirements - - If automated testing is not possible, manual test cases should be described - * It must be easy for a developer checking out a project to run the test suite based on the information in the [readme](README.adoc) - * All code must pass all unit tests before a merge request is made - - Tests that don't pass should be marked pending (with justification) or should be fixed. - * All code must pass formatting and static linting tests - * Features should be developed in feature branches - * Only working code should go into the master branch. - - master should always be in a deployable state - - Undeployable code should stay on feature branches - - Code failing the active unit or integration tests for a project is undeployable - - Functionally incomplete code is not necessarily undeployable - * Feature branches should only contain single features - - Developers should not create large, month-long, multiple-feature branches - - Developers should try to have their code merged at least once every week - * All code must be reviewed before it is merged to a master branch. - - The reviewer can not be the code author - - All the reviewer's concerns must be resolved to reviewer's satisfaction - - Reviews should enforce the coding guidelines for the project - * Bugs reported against code on a master branch should be reproduced with a failing test case before they are resolved - - Reviewers of code including bug fixes should check that a covering test has been included with the fix - * New features, bug fixes, and removed functionality should be documented in the [changelog](CHANGELOG.md) +* All code should be covered by tests. + - Ideally, all tests should be automated and run in CI. + - If automated testing is not possible, manual test cases should be described. + - Tests cases should at least exercise all documented requirements. +* It must be easy for a new developer to run the test suite based on the information in the [readme](README.adoc). +* All code must pass formatting and static linting tests. +* Only working code that passes all tests in CI will be merged into the master branch. + - The master branch should always be in a deployable state. + - Functionally incomplete code is not necessarily undeployable. +* Pull requests should only contain a single feature, fix, or refactoring. + - Developers should not create large, month-long, multiple-feature branches. + - The larger the PR, the harder it is to review, the more likely it will need to be rebased, and the harder it is to rebase it. + - If a PR changes thousands of lines of code, please consider splitting it into smaller PRs. + - Developers should aim to have their code merged at least once every week. + - Multiple small fixes or refactorings can be grouped together in one PR, but each independent fix or refactoring should be a unique commit. +* Make separate commits for logically separate changes within a PR. + - Ideally, each commit should be buildable and should pass the tests (to simplify git bisect). + - The short description (first line of the commit text) should not exceed 72 chars. The rest of the text (if applicable) should be separated by an empty line. +* All code must be reviewed before it is merged to the master branch. + - The reviewer can not be the code author. + - All the reviewer's concerns must be resolved to reviewer's satisfaction. + - Reviews should enforce the coding guidelines for the project. +* Bugs should be reproduced with a failing test case before they are resolved. + - Reviewers of code including bug fixes should check that a corresponding test has been included with the fix. +* Code that is unfinished or that requires further review should be indicated as such with a `TODO` comment. + - If appropriate, this comment should include a reference to a Jira ticket that describes the work to be done in detail. + - Since external contributors do not have access to our Jira, the comment must at least briefly describe the work to be done. +* New features, bug fixes, and removed functionality should be documented in the [changelog](CHANGELOG.md). Making a Pull Request ---- -When you start developing a feature, please create a feature branch that includes the type of branch, the ID of the issue or ticket if available, and a brief description. For example `feat/9/https-support`, `fix/OTA-123/fix-token-expiry` or `refactor/tidy-up-imports`. Please do not mix feature development, bugfixes and refactoring into the same branch. +When you start developing a feature, please create a feature branch that includes the type of branch, the ID of the github issue or Jira ticket if available, and a brief description. For example `feat/9/https-support`, `fix/OTA-123/fix-token-expiry` or `refactor/tidy-up-imports`. Please do not mix feature development, bugfixes and refactoring into the same branch. When your feature is ready, push the branch and make a pull request. We will review the request and give you feedback. Once the code passes the review it can be merged into master and the branch can be deleted. +Continuous Integration (CI) +---- + +We currently have two CI servers: Travis CI and gitlab. Travis CI is usually slower and flakier, and we don't run the tests that require provisioning credentials on it, but it is publicly accessible. Gitlab is more powerful and is the source of truth, but it is inaccessible to external contributors. Normally, we expect PRs to pass CI on both CI servers, but if Travis CI is particularly unreliable, we sometimes make exceptions and ignore it. + +PRs from external contributors will not automatically trigger CI on gitlab. If the PR is small and we believe that passing Travis CI is good enough, we will merge it if that succeeds. Otherwise, we can trigger gitlab to run CI on your PR by manually pushing the branch to gitlab. + +PRs that only affect documentation do not strictly need to pass CI. As such, gitlab does not run CI on branches that start with "docs/". Please do not make code changes in a branch with that prefix. + Developer Certificate of Origin (DCO) ---- -All commits in pull requests must contain a `Signed-off-by:` line to indicate that the developer has agreed to the terms of the [Developer Certificate of Origin](https://developercertificate.org) (see [readme](README.adoc) for more details). A simple way to achieve that is to use the `-s` flag of `git commit`. +All commits in pull requests must contain a `Signed-off-by:` line to indicate that the developer has agreed to the terms of the [Developer Certificate of Origin](https://developercertificate.org): + +~~~~ +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +~~~~ + +A simple way to sign off is to use the `-s` flag of `git commit`. New pull requests will automatically be checked by the [probot/dco](https://probot.github.io/apps/dco/). diff --git a/LICENSE b/LICENSE index a612ad9813..14e2f777f6 100644 --- a/LICENSE +++ b/LICENSE @@ -35,7 +35,7 @@ Mozilla Public License Version 2.0 means any form of the work other than Source Code Form. 1.7. "Larger Work" - means a work that combines Covered Software with other material, in + means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" diff --git a/README.adoc b/README.adoc index 0d39fc827a..a24bca7fcd 100644 --- a/README.adoc +++ b/README.adoc @@ -1,7 +1,7 @@ :toc: macro :toc-title: -https://opensource.org/licenses/MPL-2.0[image:https://img.shields.io/badge/License-MPL%202.0-brightgreen.svg[License: MPL 2.0]] https://travis-ci.org/advancedtelematic/aktualizr[image:https://travis-ci.org/advancedtelematic/aktualizr.svg?branch=master[TravisCI Build Status]] https://codecov.io/gh/advancedtelematic/aktualizr[image:https://codecov.io/gh/advancedtelematic/aktualizr/branch/master/graph/badge.svg[codecov]] https://bestpractices.coreinfrastructure.org/projects/674[image:https://bestpractices.coreinfrastructure.org/projects/674/badge[CII Best Practices]] https://github.com/RichardLitt/standard-readme[image:https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat[standard-readme compliant]] +https://opensource.org/licenses/MPL-2.0[image:https://img.shields.io/badge/License-MPL%202.0-brightgreen.svg[License: MPL 2.0]] https://app.codecov.io/gh/uptane/aktualizr[image:https://codecov.io/gh/uptane/aktualizr/branch/master/graph/badge.svg[codecov]] https://bestpractices.coreinfrastructure.org/projects/674[image:https://bestpractices.coreinfrastructure.org/projects/674/badge[OpenSSF Best Practices]] https://github.com/RichardLitt/standard-readme[image:https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat[standard-readme compliant]] [discrete] = aktualizr @@ -10,7 +10,7 @@ https://opensource.org/licenses/MPL-2.0[image:https://img.shields.io/badge/Licen C++ implementation of https://uptane.github.io[Uptane] OTA update client. ==== -The client is intended to be installed on devices that wish to receive OTA updates from an Uptane-compatible OTA server such as https://connect.ota.here.com/[HERE OTA Connect]. It is most commonly built by using the https://github.com/advancedtelematic/meta-updater[meta-updater] layer in a Yocto environment. You can use aktualizr as a stand-alone system tool or you can integrate libaktualizr into a larger project. +The client is intended to be installed on devices that wish to receive OTA updates from an Uptane-compatible OTA server such as https://connect.ota.here.com/[HERE OTA Connect]. It is most commonly built by using the https://github.com/uptane/meta-updater[meta-updater] layer in a Yocto environment. You can use aktualizr as a stand-alone system tool or you can integrate libaktualizr into a larger project. The client is responsible for: @@ -41,30 +41,29 @@ This client is aligned with the https://uptane.github.io[Uptane] security framew To install the minimal requirements on Debian/Ubuntu, run this: ---- -sudo apt install asn1c build-essential cmake curl libarchive-dev libboost-dev libboost-filesystem-dev libboost-log-dev libboost-program-options-dev libcurl4-openssl-dev libpthread-stubs0-dev libsodium-dev libsqlite3-dev libssl-dev +sudo apt install asn1c build-essential cmake curl libarchive-dev libboost-dev libboost-filesystem-dev libboost-log-dev libboost-program-options-dev libcurl4-openssl-dev libpthread-stubs0-dev libsodium-dev libsqlite3-dev libssl-dev python3 ---- The default versions packaged in recent Debian/Ubuntu releases are generally new enough to be compatible. If you are using older releases or a different variety of Linux, there are a few known minimum versions: * cmake (>= 3.5) * curl (>= 7.47) +* openssl (>= 1.0.2) * libboost-* (>= 1.58.0) * libcurl4-openssl-dev (>= 7.47) * libpthread-stubs0-dev (>= 0.3) Additional packages are used for non-essential components: -* To build the garage tools, you will need `python3`. * To build the test suite, you will need `net-tools python3-dev python3-openssl python3-venv sqlite3 valgrind`. -* To run the linting tools, you will need `clang clang-format-6.0 clang-tidy-6.0`. +* To run the linting tools, you will need `clang clang-format-11 clang-tidy-11`. * To build additional documentation, you will need `doxygen graphviz`. * To build with code coverage, you will need `lcov`. Some features also require additional packages: * For OSTree support, you will need `libostree-dev` (>= 2017.7). -* For PKCS#11 support, you will need `libp11-2 libp11-dev`. -* For systemd support for secondaries, you will need `libsystemd-dev`. +* For PKCS#11 support, you will need `libp11-3 libp11-dev`. * For fault injection, you will need `fiu-utils libfiu-dev`. ==== Mac support @@ -76,10 +75,37 @@ brew tap advancedtelematic/otaconnect brew install aktualizr ---- +You can build and install the latest development version of aktualizr on MacOS (current head of the development branch): +---- +brew tap advancedtelematic/otaconnect +brew install --HEAD aktualizr +---- + +If any of the previous release versions of aktualizr has been installed before make sure you `unlink` it prior to installing the HEAD version: +---- +brew unlink aktualizr +brew install --HEAD aktualizr +---- + +You can switch back to the release version by unlinking and installing again: +---- +brew unlink aktualizr +brew install aktualizr +---- + You can also build it yourself, with basic dependencies from homebrew. You can install the necessary dependencies as follows: ---- -brew install asn1c boost cmake libarchive libsodium pkgconfig python3 +brew install asn1c boost cmake libarchive libsodium pkgconfig python3 openssl@1.1 +---- + +and run the following from the aktualizr project directory: +---- +export CXXFLAGS=-w +cmake -S . -B build -DBoost_USE_MULTITHREADED=ON +cmake --build build --target all -- -j8 + +./build/src/aktualizr_primary/aktualizr --version ---- If you also want to compile the SOTA tools: @@ -95,7 +121,7 @@ and run cmake with `-DBUILD_SOTA_TOOLS=ON`. This project uses *git submodules*. To checkout the code: ---- -git clone --recursive https://github.com/advancedtelematic/aktualizr +git clone --recursive https://github.com/uptane/aktualizr cd aktualizr ---- @@ -119,7 +145,7 @@ You can then build the project from the `build` directory using Make: make ---- -You can also create a link:docs/deb-package-install.adoc[debian package]: +You can also create a link:docs/ota-client-guide/modules/ROOT/pages/deb-package-install.adoc[debian package]: ---- make package @@ -148,7 +174,7 @@ Note that, by default, the compilation and tests run in sequence and the output CTEST_OUTPUT_ON_FAILURE=1 CTEST_PARALLEL_LEVEL=8 make -j8 qa ---- -Some tests require additional setups, such as code coverage, HSM emulation or provisioning credentials (link:docs/provisioning-methods-and-credentialszip.adoc[]). The exact reference about these steps is the link:scripts/test.sh[main test script] used for CI. It is parametrized by a list of environment variables and is used by our CI environments. To use it, run it in the project's root directory: +Some tests require additional setups, such as code coverage, HSM emulation or link:docs/ota-client-guide/modules/ROOT/pages/provisioning-methods-and-credentialszip.adoc[provisioning credentials]. The exact reference about these steps is the link:scripts/test.sh[main test script] used for CI. It is parametrized by a list of environment variables and is used by our CI environments. To use it, run it in the project's root directory: ---- ./scripts/test.sh @@ -211,7 +237,7 @@ To run the aktualizr client, you will need to provide a toml-formatted configura aktualizr -c ---- -Additional command line options can be found in the code (see link:../src/aktualizr_primary/main.cc[]) or by running `aktualizr --help`. More details on configuring aktualizr can be found in link:docs/configuration.adoc[]. If you are using https://github.com/advancedtelematic/meta-updater[meta-updater], more information about configuring aktualizr in that environment can be found there. +Additional command line options can be found link:./src/aktualizr_primary/main.cc[in the code] or by running `aktualizr --help`. More details on configuring aktualizr can be found in link:docs/ota-client-guide/modules/ROOT/pages/aktualizr-config-options.adoc[]. If you are using https://github.com/advancedtelematic/meta-updater[meta-updater], more information about configuring aktualizr in that environment can be found there. [#fake-device] === Running a "fake" device @@ -226,7 +252,11 @@ Some more detailed instructions on how to configure a fake device can be found o === Provisioning -If you intend to use aktualizr to authenticate with a server, you will need some form of provisioning. Aktualizr currently supports provisioning with shared credentials or with device credentials. Device credential provisioning supports using an HSM to store private keys. The differences and details are explained in link:docs/client-provisioning-methods.adoc[] and link:docs/provision-with-device-credentials.adoc[]. You can learn more about the credentials files used to support provisioning in link:docs/provisioning-methods-and-credentialszip.adoc[]. +If you intend to use aktualizr to authenticate with a server, you will need some form of provisioning. Aktualizr currently supports provisioning with shared credentials or with device credentials. Device credential provisioning supports using an HSM to store private keys. The differences and details are explained in link:docs/ota-client-guide/modules/ROOT/pages/client-provisioning-methods.adoc[] and link:docs/ota-client-guide/modules/ROOT/pages/enable-device-cred-provisioning.adoc[]. You can learn more about the credentials files used to support provisioning in link:docs/ota-client-guide/modules/ROOT/pages/provisioning-methods-and-credentialszip.adoc[]. + +== Changelog + +The changelog is available in link:CHANGELOG.md[]. == Maintainers @@ -236,42 +266,8 @@ This code is maintained by the OTA team at https://www.here.com/products/automot Complete contribution guidelines can be found in link:CONTRIBUTING.md[]. -== Changelog - -A changelog can be found in link:CHANGELOG.md[]. - == License -This code is licensed under the link:LICENSE[Mozilla Public License 2.0], a copy of which can be found in this repository. All code is copyright HERE Europe B.V., 2016-2019. - -We also require that contributors accept the terms of Linux Foundation's link:https://developercertificate.org/[Developer Certificate of Origin]: - ----- -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. ----- +This code is licensed under the link:LICENSE[Mozilla Public License 2.0], a copy of which can be found in this repository. All code is copyright HERE Europe B.V., 2016-2020. -Specific instructions can be found in link:CONTRIBUTING.md[] +We require that contributors accept the terms of Linux Foundation's link:https://developercertificate.org/[Developer Certificate of Origin]. Specific instructions can be found in link:CONTRIBUTING.md[]. diff --git a/actions.md b/actions.md index 87bad28895..1bff551d71 100644 --- a/actions.md +++ b/actions.md @@ -6,14 +6,14 @@ These are the primary actions that a user of libaktualizr can perform through th - [x] Initialization - [ ] Set boot count to 0 to indicate successful boot - - [x] Detect a system reboot on the primary, if expected (bootloader_test.cc) - - [x] Initialize secondaries - - [x] Add secondaries from configuration (uptane_test.cc) - - [x] Parse secondary config files in JSON format (config_test.cc) - - [x] Create secondary object - - [x] Create a virtual secondary for testing (uptane_secondary_test.cc) - - [x] Add secondaries via API (aktualizr_test.cc) - - [x] Adding multiple secondaries with the same serial throws an error (uptane_test.cc) + - [x] Detect a system reboot on the Primary, if expected (bootloader_test.cc) + - [x] Initialize Secondaries + - [x] Add Secondaries from configuration (uptane_test.cc) + - [x] Parse Secondary config files in JSON format (config_test.cc) + - [x] Create Secondary object + - [x] Create a virtual Secondary for testing (uptane_secondary_test.cc) + - [x] Add Secondaries via API (aktualizr_test.cc) + - [x] Adding multiple Secondaries with the same serial throws an error (uptane_test.cc) - [x] Initialize device ID - [x] Use a provided device ID (OTA-985, uptane_init_test.cc) - [x] Generate a random device ID (OTA-986, utils_test.cc, uptane_init_test.cc) @@ -33,26 +33,26 @@ These are the primary actions that a user of libaktualizr can perform through th - [x] Generate RSA keypairs via PKCS#11 (crypto_test.cc, keymanager_test.cc) - [x] Read a TLS certificate via PKCS#11 (crypto_test.cc) - [x] Sign and verify a file with RSA via PKCS#11 (crypto_test.cc, keymanager_test.cc) - - [x] Initialize primary ECU keys - - [x] Generate primary ECU keys (OTA-989, uptane_serial_test.cc) + - [x] Initialize Primary ECU keys + - [x] Generate Primary ECU keys (OTA-989, uptane_serial_test.cc) - [x] Generate RSA 2048 key pairs (crypto_test.cc) - [x] Generate RSA 4096 key pairs (crypto_test.cc) - [x] Generate ED25519 key pairs (crypto_test.cc) - - [x] Initialize primary ECU serial - - [x] Use a provided primary serial (OTA-988, config_test.cc) - - [x] Generate primary serial (OTA-989, uptane_serial_test.cc) + - [x] Initialize Primary ECU serial + - [x] Use a provided Primary serial (OTA-988, config_test.cc) + - [x] Generate Primary serial (OTA-989, uptane_serial_test.cc) - [x] Use a provided hardware ID (uptane_test.cc) - [x] Use the system hostname as hardware ID if one is not provided (uptane_init_test.cc) - [x] Read the hostname from the system (utils_test.cc) - - [x] Register ECUs with director - - [x] Register primary ECU with director (uptane_test.cc) - - [x] Register secondary ECUs with director (uptane_test.cc) + - [x] Register ECUs with Director + - [x] Register Primary ECU with Director (uptane_test.cc) + - [x] Register Secondary ECUs with Director (uptane_test.cc) - [x] Abort if initialization fails - [x] Recover from partial provisioning and network loss (OTA-991, uptane_network_test.cc, uptane_key_test.cc) - [x] Detect and recover from failed provisioning (uptane_init_test.cc) - - [x] Verify secondaries against storage - - [x] Identify previously unknown secondaries (uptane_test.cc) - - [x] Identify currently unavailable secondaries (uptane_test.cc) + - [x] Verify Secondaries against storage + - [x] Identify previously unknown Secondaries (uptane_test.cc) + - [x] Identify currently unavailable Secondaries (uptane_test.cc) - [x] Send system/network info to server - [x] Read hardware info from the system (utils_test.cc) - [x] Send hardware info to the server (OTA-984, uptane_test.cc) @@ -82,20 +82,20 @@ These are the primary actions that a user of libaktualizr can perform through th - [x] Send CampaignPostponeComplete event - [x] Fetch metadata from server - [x] Generate and send manifest (see below) - - [x] Fetch metadata from the director (uptane_test.cc, uptane_vector_tests.cc) - - [x] Check metadata from the director (uptane_test.cc, uptane_vector_tests.cc) + - [x] Fetch metadata from the Director (uptane_test.cc, uptane_vector_tests.cc) + - [x] Check metadata from the Director (uptane_test.cc, uptane_vector_tests.cc) - [x] Validate Uptane metadata (see below) - [x] Identify targets for known ECUs (uptane_test.cc, uptane_vector_tests.cc) - [x] Ignore updates for unrecognized ECUs (uptane_test.cc uptane_vector_tests.cc) - - [x] Fetch metadata from the images repo (uptane_test.cc, uptane_vector_tests.cc) - - [x] Check metadata from the images repo (uptane_test.cc, uptane_vector_tests.cc) + - [x] Fetch metadata from the Image repo (uptane_test.cc, uptane_vector_tests.cc) + - [x] Check metadata from the Image repo (uptane_test.cc, uptane_vector_tests.cc) - [x] Validate Uptane metadata (see below) - [x] Check for updates - - [x] Check metadata from the director (uptane_test.cc, uptane_vector_tests.cc) + - [x] Check metadata from the Director (uptane_test.cc, uptane_vector_tests.cc) - [x] Validate Uptane metadata (see below) - [x] Identify updates for known ECUs (uptane_test.cc, uptane_vector_tests.cc) - [x] Ignore updates for unrecognized ECUs (uptane_test.cc uptane_vector_tests.cc) - - [x] Check metadata from the images repo (uptane_test.cc, uptane_vector_tests.cc) + - [x] Check metadata from the Image repo (uptane_test.cc, uptane_vector_tests.cc) - [x] Validate Uptane metadata (see below) - [x] Send UpdateCheckComplete event with available updates (aktualizr_test.cc) - [x] Send UpdateCheckComplete event after successful check with no available updates (aktualizr_test.cc) @@ -136,37 +136,37 @@ These are the primary actions that a user of libaktualizr can perform through th - [x] List targets in storage via API (aktualizr_test.cc) - [x] Remove targets in storage via API(aktualizr_test.cc) - [x] Install updates - - [x] Send metadata to secondary ECUs (uptane_test.cc) + - [x] Send metadata to Secondary ECUs (uptane_test.cc) - [x] Identify ECU for each target (uptane_test.cc, aktualizr_test.cc) - [x] Reject targets which do not match a known ECU (uptane_test.cc) - - [x] Install updates on primary - - [x] Check if there are updates to install for the primary (uptane_test.cc, aktualizr_test.cc) + - [x] Install updates on Primary + - [x] Check if there are updates to install for the Primary (uptane_test.cc, aktualizr_test.cc) - [x] Check if an update is already installed (uptane_test.cc) - [ ] Set boot count to 0 and rollback flag to 0 to indicate system update - - [x] Send InstallStarted event for primary (aktualizr_test.cc) - - [x] Send EcuInstallationStartedReport to server for primary (uptane_test.cc, aktualizr_test.cc) + - [x] Send InstallStarted event for Primary (aktualizr_test.cc) + - [x] Send EcuInstallationStartedReport to server for Primary (uptane_test.cc, aktualizr_test.cc) - [x] Send an event report (see below) - - [x] Install an update on the primary - - [x] Install an OSTree update on the primary (aktualizr_fullostree_test.cc) + - [x] Install an update on the Primary + - [x] Install an OSTree update on the Primary (aktualizr_fullostree_test.cc) - [ ] Notify "reboot needed" after an OSTree update trigger - [x] Set new version to pending status after an OSTree update trigger (aktualizr_test.cc) - [x] Send EcuInstallationAppliedReport to server after an OSTree update trigger (aktualizr_test.cc) - [x] Uptane check for updates and manifest sends are disabled while an installation is pending reboot (aktualizr_test.cc) - - [ ] Trigger a system reboot at the end of the installation process in case of the ostree package manager usage (OTA-2135) + - [ ] Trigger a system reboot at the end of the installation process in case of the OSTree package manager usage (OTA-2135) - [x] Emulate a reboot at the end of the installation process in case of the fake package manager usage (OTA-2135, aktualizr_test.cc) - - [x] Install a binary update on the primary (uptane_test.cc, aktualizr_test.cc) - - [x] Store installation result for primary (uptane_test.cc) - - [x] Send InstallTargetComplete event for primary (aktualizr_test.cc) - - [x] Send EcuInstallationCompletedReport to server for primary (uptane_test.cc, aktualizr_test.cc) + - [x] Install a binary update on the Primary (uptane_test.cc, aktualizr_test.cc) + - [x] Store installation result for Primary (uptane_test.cc) + - [x] Send InstallTargetComplete event for Primary (aktualizr_test.cc) + - [x] Send EcuInstallationCompletedReport to server for Primary (uptane_test.cc, aktualizr_test.cc) - [x] Send an event report (see below) - - [x] Install updates on secondaries - - [x] Send InstallStarted event for secondaries (aktualizr_test.cc) - - [x] Send EcuInstallationStartedReport to server for secondaries (uptane_test.cc) + - [x] Install updates on Secondaries + - [x] Send InstallStarted event for Secondaries (aktualizr_test.cc) + - [x] Send EcuInstallationStartedReport to server for Secondaries (uptane_test.cc) - [x] Send an event report (see below) - - [x] Send images to secondary ECUs (aktualizr_test.cc) - - [x] Store installation result for secondary (aktualizr_test.cc) - - [x] Send InstallTargetComplete event for secondaries (aktualizr_test.cc) - - [x] Send EcuInstallationCompletedReport to server for secondaries (aktualizr_test.cc) + - [x] Send images to Secondary ECUs (aktualizr_test.cc) + - [x] Store installation result for Secondary (aktualizr_test.cc) + - [x] Send InstallTargetComplete event for Secondaries (aktualizr_test.cc) + - [x] Send EcuInstallationCompletedReport to server for Secondaries (aktualizr_test.cc) - [x] Send an event report (see below) - [x] Store installation result for device (uptane_test.cc) - [x] Compute device installation failure code as concatenation of ECU failure codes (aktualizr_test.cc) @@ -200,13 +200,13 @@ These are internal requirements that are relatively opaque to the user and/or co - [x] Validate TUF roles (tuf_test.cc) - [x] Delegated roles have custom names (tuf_test.cc) - [x] Reject delegated role names that are identical to reserved role names (tuf_test.cc) - - [x] Validate a TUF root (tuf_test.cc, uptane_test.cc) - - [x] Throw an exception if a TUF root is invalid - - [x] Throw an exception if a TUF root is unsigned (tuf_test.cc, uptane_test.cc) - - [x] Throw an exception if a TUF root has no roles (tuf_test.cc) - - [x] Throw an exception if a TUF root has unknown signature types (uptane_test.cc) - - [x] Throw an exception if a TUF root has invalid key IDs (uptane_test.cc) - - [x] Throw an exception if a TUF root signature threshold is invalid (uptane_test.cc) + - [x] Validate Root metadata (tuf_test.cc, uptane_test.cc) + - [x] Throw an exception if Root metadata is invalid + - [x] Throw an exception if Root metadata is unsigned (tuf_test.cc, uptane_test.cc) + - [x] Throw an exception if Root metadata has no roles (tuf_test.cc) + - [x] Throw an exception if Root metadata has unknown signature types (uptane_test.cc) + - [x] Throw an exception if Root metadata has invalid key IDs (uptane_test.cc) + - [x] Throw an exception if Root metadata signature threshold is invalid (uptane_test.cc) - [x] Parse Uptane timestamps (types_test.cc) - [x] Throw an exception if an Uptane timestamp is invalid (types_test.cc) - [x] Get current time (types_test.cc) @@ -224,18 +224,18 @@ These are internal requirements that are relatively opaque to the user and/or co - [x] Accept update with rotated Uptane roots (uptane_vector_tests.cc) - [x] Abort update with incorrectly rotated Uptane roots (uptane_vector_tests.cc) - [x] Abort update if any metadata has an invalid hardware ID (uptane_vector_tests.cc) - - [x] Abort update if the director targets metadata has an invalid ECU ID (uptane_vector_tests.cc) + - [x] Abort update if the Director Targets metadata has an invalid ECU ID (uptane_vector_tests.cc) - [x] Recover from an interrupted Uptane iteration (uptane_test.cc) - [x] Generate and send manifest - - [x] Get manifest from primary (uptane_test.cc) - - [x] Get primary installation result (uptane_test.cc) - - [x] Get manifest from secondaries (uptane_test.cc) - - [x] Ignore secondaries with bad signatures (uptane_test.cc) + - [x] Get manifest from Primary (uptane_test.cc) + - [x] Get Primary installation result (uptane_test.cc) + - [x] Get manifest from Secondaries (uptane_test.cc) + - [x] Ignore Secondaries with bad signatures (uptane_test.cc) - [x] Send manifest to the server (uptane_test.cc) - [x] Send an event report - [x] Generate a random UUID (utils_test.cc) - - [x] Include correlation ID from targets metadata (aktualizr_test.cc) - - [x] Correlation ID is empty if none was provided in targets metadata (aktualizr_test.cc) + - [x] Include correlation ID from Targets metadata (aktualizr_test.cc) + - [x] Correlation ID is empty if none was provided in Targets metadata (aktualizr_test.cc) - [x] Report an event to the server (reportqueue_test.cc) - [x] Report a series of events to the server (reportqueue_test.cc) - [x] Recover from errors while sending event reports (reportqueue_test.cc) @@ -252,7 +252,7 @@ These are internal requirements that are relatively opaque to the user and/or co - [x] Migrate backward through SQL schemas (sqlstorage_test.cc) - [x] Reject invalid SQL databases (sqlstorage_test.cc) - [x] Migrate from the legacy filesystem storage (sqlstorage_test.cc, uptane_test.cc) - - [x] Load and store primary keys in an SQL database (storage_common_test.cc) + - [x] Load and store Primary keys in an SQL database (storage_common_test.cc) - [x] Load and store TLS credentials in an SQL database (storage_common_test.cc) - [x] Load and store Uptane metadata in an SQL database (storage_common_test.cc) - [x] Load and store Uptane roots in an SQL database (storage_common_test.cc) @@ -276,19 +276,18 @@ These are internal requirements that are relatively opaque to the user and/or co - [x] Create a temporary directory (utils_test.cc) - [x] Serialize and deserialize asn1 (asn1_test.cc) - [x] Support a fake package manager for testing (packagemanagerfactory_test.cc) - - [x] ~~Support a Debian package manager~~ (packagemanagerfactory_test.cc, debianmanager_test.cc) - - [x] Support virtual partial verification secondaries for testing - - [x] Partial verification secondaries generate and store public keys (uptane_secondary_test.cc) - - [x] Partial verification secondaries can verify Uptane metadata (uptane_secondary_test.cc) + - [x] Support virtual partial verification Secondaries for testing + - [x] Partial verification Secondaries generate and store public keys (uptane_secondary_test.cc) + - [x] Partial verification Secondaries can verify Uptane metadata (uptane_secondary_test.cc) ### Expected action sequences This is just the list of sequences currently covered. It is likely that there are more worth testing, but these tests are expensive. - [x] Initialize -> UptaneCycle -> no updates -> no further action or events (aktualizr_test.cc) -- [x] Initialize -> UptaneCycle -> updates downloaded and installed for primary and secondary (aktualizr_test.cc) -- [x] Initialize -> UptaneCycle -> updates downloaded and installed for primary (after reboot) and secondary (aktualizr_test.cc) -- [x] Initialize -> UptaneCycle -> updates downloaded and installed for secondaries without changing the primary (aktualizr_test.cc) +- [x] Initialize -> UptaneCycle -> updates downloaded and installed for Primary and Secondary (aktualizr_test.cc) +- [x] Initialize -> UptaneCycle -> updates downloaded and installed for Primary (after reboot) and Secondary (aktualizr_test.cc) +- [x] Initialize -> UptaneCycle -> updates downloaded and installed for Secondaries without changing the Primary (aktualizr_test.cc) - [x] Initialize -> CheckUpdates -> no updates -> no further action or events (aktualizr_test.cc) - [x] Initialize -> Download -> nothing to download (aktualizr_test.cc) - [x] Initialize -> CheckUpdates -> Download -> updates downloaded but not installed (aktualizr_test.cc) @@ -314,7 +313,7 @@ These tools all link with libaktualizr, although they do not necessary use the A ### aktualizr-secondary -`aktualizr-secondary` was designed to demonstrate an Uptane-compliant secondary but is currently not part of the core product. It also uses libaktualizr, but less extensively than `aktualizr-primary`. This is just the list of things currently tested that relate specifically to it. +`aktualizr-secondary` was designed to demonstrate an Uptane-compliant Secondary but is currently not part of the core product. It also uses libaktualizr, but less extensively than `aktualizr-primary`. This is just the list of things currently tested that relate specifically to it. - [x] Parse config files in TOML format (aktualizr_secondary_config_test.cc) - [x] Write its config to file or to the log (aktualizr_secondary_config_test.cc) @@ -338,38 +337,38 @@ These tools all link with libaktualizr, although they do not necessary use the A - [x] Write its config to file or to the log (aktualizr_info_config_test.cc) - [x] Print information from libaktualizr storage (aktualizr_info_test.cc) - [x] Print device ID (aktualizr_info_test.cc) - - [x] Print primary ECU serial (aktualizr_info_test.cc) - - [x] Print primary ECU hardware ID (aktualizr_info_test.cc) - - [x] Print secondary ECU serials (aktualizr_info_test.cc) - - [x] Print secondary ECU hardware IDs (aktualizr_info_test.cc) - - [x] Print secondary ECUs no longer accessible (miscofigured: old) (aktualizr_info_test.cc) - - [x] Print secondary ECUs registered after provisioning (not registered) (aktualizr_info_test.cc) + - [x] Print Primary ECU serial (aktualizr_info_test.cc) + - [x] Print Primary ECU hardware ID (aktualizr_info_test.cc) + - [x] Print Secondary ECU serials (aktualizr_info_test.cc) + - [x] Print Secondary ECU hardware IDs (aktualizr_info_test.cc) + - [x] Print Secondary ECUs no longer accessible (miscofigured: old) (aktualizr_info_test.cc) + - [x] Print Secondary ECUs registered after provisioning (not registered) (aktualizr_info_test.cc) - [x] Print provisioning status (aktualizr_info_test.cc) - [x] Print whether metadata has been fetched from the server (aktualizr_info_test.cc) - - [x] Print root metadata from images repository (aktualizr_info_test.cc) - - [x] Print targets metadata from images repository (aktualizr_info_test.cc) - - [x] Print root metadata from director repository (aktualizr_info_test.cc) - - [x] Print targets metadata from director repository (aktualizr_info_test.cc) + - [x] Print Root metadata from Image repository (aktualizr_info_test.cc) + - [x] Print Targets metadata from Image repository (aktualizr_info_test.cc) + - [x] Print Root metadata from Director repository (aktualizr_info_test.cc) + - [x] Print Targets metadata from Director repository (aktualizr_info_test.cc) - [x] Print TLS credentials (aktualizr_info_test.cc) - - [x] Print primary ECU keys (aktualizr_info_test.cc) - - [x] Print primary ECU current and pending versions (aktualizr_info_test.cc) - - [x] Print secondary ECU current and pending versions (aktualizr_info_test.cc) + - [x] Print Primary ECU keys (aktualizr_info_test.cc) + - [x] Print Primary ECU current and pending versions (aktualizr_info_test.cc) + - [x] Print Secondary ECU current and pending versions (aktualizr_info_test.cc) - [x] Print device name only for scripting purposes (aktualizr_info_test.cc) - [x] Print delegations (aktualizr_info_test.cc) - - [x] Print snapshot (aktualizr_info_test.cc) - - [x] Print timestamp (aktualizr_info_test.cc) + - [x] Print Snapshot metadata (aktualizr_info_test.cc) + - [x] Print Timestamp metadata (aktualizr_info_test.cc) ### uptane-generator `uptane-generator` is used in testing to simulate the generation of Uptane repositories. -- [x] Generate images and director repos (repo_test.cc) -- [x] Add an image to the images repo (repo_test.cc) +- [x] Generate Image and Director repos (repo_test.cc) +- [x] Add an image to the Image repo (repo_test.cc) - [x] Add custom image metadata without an actual file (repo_test.cc) -- [x] Copy an image to the director repo (repo_test.cc) - - [x] Clear the staged director targets metadata (repo_test.cc) - - [x] Populate the director targets metadata with the currently signed metadata (repo_test.cc) -- [x] Sign director repo targets (repo_test.cc) +- [x] Copy an image to the Director repo (repo_test.cc) + - [x] Clear the staged Director Targets metadata (repo_test.cc) + - [x] Populate the Director Targets metadata with the currently signed metadata (repo_test.cc) +- [x] Sign Director repo Targets metadata (repo_test.cc) - [x] Add simple delegation (repo_test.cc) - [x] Add image with delegation (repo_test.cc) - [x] Sign arbitrary metadata (repo_test.cc) @@ -399,15 +398,13 @@ These tools all link with libaktualizr, although they do not necessary use the A - [x] Sign device certificate with fleet private key (cert_provider_test.cc) - [x] Serialize device private key to a string (cert_provider_test.cc) - [x] Serialize device certificate to a string (cert_provider_test.cc) -- [ ] Read server root CA from credentials archive - - [ ] Read server root CA from server_ca.pem if present (to support community edition use case) - - [x] Read server root CA from p12 (cert_provider_shared_cred_test.cc) +- [x] Read server root CA from p12 in the credentials archive (cert_provider_shared_cred_test.cc) - [x] Write credentials to a local directory if requested (cert_provider_test.cc) - [x] Provide device private key (cert_provider_test.cc) - [x] Provide device certificate (cert_provider_test.cc) - [x] Provide root CA if requested (cert_provider_shared_cred_test.cc) - [x] Provide server URL if requested (cert_provider_shared_cred_test.cc) -- [ ] Copy credentials to a device with ssh +- [ ] Copy credentials to a device with ssh (covered by oe-selftest) - [ ] Create parent directories - [ ] Provide device private key - [ ] Provide device certificate @@ -434,7 +431,7 @@ These tools also use libaktualizr, but only for common utility functions. They a - [x] Authenticate with treehub server (see below) - [x] Fetch OSTree objects from source repository and push to destination repository (see below) - [x] Check if credentials support offline signing (authenticate_test.cc) -- [ ] Upload root ref to images repository if credentials do not support offline signing +- [ ] Upload root ref to Image repository if credentials do not support offline signing - [x] Abort when given bogus command line options (sota_tools/CMakeLists.txt, test-bad-option) - [x] Support debug logging (sota_tools/CMakeLists.txt, test-verbose-logging) @@ -459,12 +456,12 @@ These tools also use libaktualizr, but only for common utility functions. They a ### garage-check -`garage-check` simply verifies that a given OSTree commit exists on a remote Treehub server and is present in the targets.json from the images repository. +`garage-check` simply verifies that a given OSTree commit exists on a remote Treehub server and is present in the targets.json from the Image repository. - [x] Parse credentials (see below) - [x] Authenticate with treehub server (see below) - [x] Verify that a commit exists in a remote repo (sota_tools/CMakeLists.txt, run_expired_test.sh) -- [x] Get targets.json from images repository (sota_tools/CMakeLists.txt, run_expired_test.sh) +- [x] Get targets.json from Image repository (sota_tools/CMakeLists.txt, run_expired_test.sh) - [x] Abort if targets.json has expired (sota_tools/CMakeLists.txt, run_expired_test.sh) - [x] Find specified OSTree ref in targets.json (sota_tools/CMakeLists.txt, run_expired_test.sh) @@ -480,12 +477,12 @@ These tools also use libaktualizr, but only for common utility functions. They a - [x] Extract credentials from a provided JSON file (authenticate_test.cc) - [x] Reject a bogus provided JSON file (authenticate_test.cc) - [x] Parse authentication information from treehub.json (authenticate_test.cc) - - [x] Parse images repository URL from a provided archive (authenticate_test.cc) + - [x] Parse Image repository URL from a provided archive (authenticate_test.cc) - [ ] Parse treehub URL from a provided archive - [x] Authenticate with treehub server - [x] Authenticate with username and password (basic auth) (treehub_server_test.cc) - [x] Authenticate with OAuth2 (treehub_server_test.cc, authenticate_test.cc) - - [ ] Authenticate with TLS credentials (authenticate_test.cc [BROKEN]) + - [x] Authenticate with TLS credentials (authenticate_test.cc) - [x] Authenticate with nothing (no auth) (authenticate_test.cc) - [x] Use a provided CA certificate (sota_tools/CMakeLists.txt, test-cacert-used) - [x] Abort when given a bogus CA certificate (sota_tools/CMakeLists.txt, test-cacert-not-found) @@ -544,6 +541,6 @@ These tools also use libaktualizr, but only for common utility functions. They a - [x] Build an image with manual control that provisions successfully - [x] Build an image for Raspberry Pi - [x] Build an image using grub as a bootloader that provisions successfully -- [x] Build an image for a secondary -- [x] Build an image for a primary intended to connect to a secondary +- [x] Build an image for a Secondary +- [x] Build an image for a Primary intended to connect to a Secondary diff --git a/ci/gitlab/.gitlab-ci.yml b/ci/gitlab/.gitlab-ci.yml index 4b43659c80..f60c3218f9 100644 --- a/ci/gitlab/.gitlab-ci.yml +++ b/ci/gitlab/.gitlab-ci.yml @@ -1,6 +1,7 @@ stages: - docker - test + - static scans - pkg-test - oe-checkout - oe-test @@ -10,24 +11,28 @@ stages: variables: UBUNTU_BIONIC_MASTER_IMAGE: ${CI_REGISTRY_IMAGE}:ci-master-UBUNTU_BIONIC UBUNTU_XENIAL_MASTER_IMAGE: ${CI_REGISTRY_IMAGE}:ci-master-UBUNTU_XENIAL - DEBIAN_TESTING_MASTER_IMAGE: ${CI_REGISTRY_IMAGE}:ci-master-DEBIAN_TESTING + UBUNTU_FOCAL_MASTER_IMAGE: ${CI_REGISTRY_IMAGE}:ci-master-UBUNTU_FOCAL UBUNTU_BIONIC_MASTER_INSTALLIMAGE: ${CI_REGISTRY_IMAGE}:ci-install-master-UBUNTU_BIONIC UBUNTU_XENIAL_MASTER_INSTALLIMAGE: ${CI_REGISTRY_IMAGE}:ci-install-master-UBUNTU_XENIAL UBUNTU_BIONIC_PR_IMAGE: ${CI_REGISTRY_IMAGE}:ci-${CI_COMMIT_REF_SLUG}-UBUNTU_BIONIC UBUNTU_XENIAL_PR_IMAGE: ${CI_REGISTRY_IMAGE}:ci-${CI_COMMIT_REF_SLUG}-UBUNTU_XENIAL - DEBIAN_TESTING_PR_IMAGE: ${CI_REGISTRY_IMAGE}:ci-${CI_COMMIT_REF_SLUG}-DEBIAN_TESTING + UBUNTU_FOCAL_PR_IMAGE: ${CI_REGISTRY_IMAGE}:ci-${CI_COMMIT_REF_SLUG}-UBUNTU_FOCAL UBUNTU_BIONIC_PR_INSTALLIMAGE: ${CI_REGISTRY_IMAGE}:ci-install-${CI_COMMIT_REF_SLUG}-UBUNTU_BIONIC UBUNTU_XENIAL_PR_INSTALLIMAGE: ${CI_REGISTRY_IMAGE}:ci-install-${CI_COMMIT_REF_SLUG}-UBUNTU_XENIAL CCACHE_DIR: $CI_PROJECT_DIR/ccache - GIT_SUBMODULE_STRATEGY: recursive + GIT_SUBMODULE_STRATEGY: none # bitbake variables BITBAKE_IMAGE: ${METAUPDATER_REGISTRY_IMAGE}:ci-master-bitbake BITBAKE_CHECKOUT_IMAGE: ${METAUPDATER_REGISTRY_IMAGE}:ci-master-checkout include: + - template: SAST.gitlab-ci.yml + - template: Secret-Detection.gitlab-ci.yml + - template: Dependency-Scanning.gitlab-ci.yml + - template: License-Scanning.gitlab-ci.yml - project: 'olp/edge/ota/connect/client/meta-updater' ref: 'master' file: 'scripts/ci/gitlab/docker.yml' @@ -58,9 +63,9 @@ Docker Setup: - docker build --pull --cache-from "$UBUNTU_XENIAL_MASTER_IMAGE" --cache-from "$UBUNTU_XENIAL_PR_IMAGE" -f "$CI_PROJECT_DIR/docker/Dockerfile.ubuntu.xenial" -t "$UBUNTU_XENIAL_PR_IMAGE" . - docker push "$UBUNTU_XENIAL_PR_IMAGE" - - docker pull "$DEBIAN_TESTING_PR_IMAGE" || docker pull "$DEBIAN_TESTING_MASTER_IMAGE" || true - - docker build --pull --cache-from "$DEBIAN_TESTING_MASTER_IMAGE" --cache-from "$DEBIAN_TESTING_PR_IMAGE" -f "$CI_PROJECT_DIR/docker/Dockerfile.debian.testing" -t "$DEBIAN_TESTING_PR_IMAGE" . - - docker push "$DEBIAN_TESTING_PR_IMAGE" + - docker pull "$UBUNTU_FOCAL_PR_IMAGE" || docker pull "$UBUNTU_FOCAL_MASTER_IMAGE" || true + - docker build --pull --cache-from "$UBUNTU_FOCAL_MASTER_IMAGE" --cache-from "$UBUNTU_FOCAL_PR_IMAGE" -f "$CI_PROJECT_DIR/docker/Dockerfile.ubuntu.focal" -t "$UBUNTU_FOCAL_PR_IMAGE" . + - docker push "$UBUNTU_FOCAL_PR_IMAGE" # used for install tests - docker pull "$UBUNTU_BIONIC_PR_INSTALLIMAGE" || docker pull "$UBUNTU_BIONIC_MASTER_INSTALLIMAGE" || true @@ -71,17 +76,37 @@ Docker Setup: - docker build --pull --cache-from "$UBUNTU_XENIAL_MASTER_INSTALLIMAGE" --cache-from "$UBUNTU_XENIAL_PR_INSTALLIMAGE" -f "$CI_PROJECT_DIR/docker/Dockerfile-test-install.ubuntu.xenial" -t "$UBUNTU_XENIAL_PR_INSTALLIMAGE" . - docker push "$UBUNTU_XENIAL_PR_INSTALLIMAGE" +# static scans: + +license_scanning: + stage: static scans + +bandit-sast: + stage: static scans + needs: [] + +flawfinder-sast: + stage: static scans + needs: [] + +secret_detection: + stage: static scans + needs: [] + coverage: variables: + GIT_CLONE_PATH: $CI_BUILDS_DIR/aktualizr-coverage-$CI_JOB_ID + GIT_SUBMODULE_STRATEGY: 'recursive' + TEST_BUILD_DIR: 'build-coverage' TEST_CMAKE_BUILD_TYPE: 'Valgrind' TEST_WITH_COVERAGE: '1' TEST_WITH_P11: '1' - TEST_WITH_DOCKERAPP: '1' TEST_WITH_FAULT_INJECTION: '1' TEST_SOTA_PACKED_CREDENTIALS: "$CI_PROJECT_DIR/credentials.zip" image: "$UBUNTU_BIONIC_PR_IMAGE" stage: test + needs: ["Docker Setup"] except: - /^20\d\d\.\d\d?-docs$/ - /^docs\// @@ -92,17 +117,24 @@ coverage: artifacts: paths: - build-coverage/coverage/ + reports: + junit: build-coverage/report.xml script: - aws s3 cp s3://ota-gitlab-ci/hereotaconnect_prod.zip $CI_PROJECT_DIR/credentials.zip - ./scripts/test.sh + - xsltproc -o build-coverage/report.xml ./third_party/junit/ctest2junit.xsl build-coverage/Testing/**/Test.xml > /dev/null nop11: variables: + GIT_CLONE_PATH: $CI_BUILDS_DIR/aktualizr-nop11-$CI_JOB_ID + GIT_SUBMODULE_STRATEGY: 'recursive' + TEST_BUILD_DIR: 'build-nop11' TEST_CMAKE_BUILD_TYPE: 'Debug' TEST_WITH_TESTSUITE: '0' image: "$UBUNTU_BIONIC_PR_IMAGE" stage: test + needs: ["Docker Setup"] except: - /^20\d\d\.\d\d?-docs$/ - /^docs\// @@ -113,19 +145,22 @@ nop11: script: - ./scripts/test.sh -debian-build+static: +focal-build-static: variables: - TEST_BUILD_DIR: 'build-debian-testing' + GIT_CLONE_PATH: $CI_BUILDS_DIR/aktualizr-focal-build-static-$CI_JOB_ID + GIT_SUBMODULE_STRATEGY: 'recursive' + + TEST_BUILD_DIR: 'build-ubuntu-focal' TEST_CC: 'clang' # should run with valgrind but some leaks are still unfixed # TEST_CMAKE_BUILD_TYPE = 'Valgrind' TEST_CMAKE_BUILD_TYPE: 'Debug' TEST_TESTSUITE_ONLY: 'crypto' TEST_WITH_STATICTESTS: '1' - TEST_WITH_LOAD_TESTS: '1' TEST_WITH_DOCS: '1' - image: "$DEBIAN_TESTING_PR_IMAGE" + image: "$UBUNTU_FOCAL_PR_IMAGE" stage: test + needs: ["Docker Setup"] except: - /^20\d\d\.\d\d?-docs$/ - /^docs\// @@ -133,16 +168,18 @@ debian-build+static: key: "$CI_JOB_NAME" paths: - ccache/ + artifacts: + paths: + - build-ubuntu-focal/docs/doxygen/ + reports: + junit: build-ubuntu-focal/report.xml script: - ./scripts/test.sh + - xsltproc -o build-ubuntu-focal/report.xml ./third_party/junit/ctest2junit.xsl build-ubuntu-focal/Testing/**/Test.xml > /dev/null -bionic-pkg: - variables: - TEST_BUILD_DIR: 'build-bionic' - TEST_INSTALL_RELEASE_NAME: '-ubuntu_18.04' - TEST_INSTALL_DESTDIR: "$CI_PROJECT_DIR/build-bionic/pkg" - image: "$UBUNTU_BIONIC_PR_IMAGE" +.u-pkg: + needs: ["Docker Setup"] stage: test except: - /^20\d\d\.\d\d?-docs$/ @@ -151,42 +188,40 @@ bionic-pkg: key: "$CI_JOB_NAME" paths: - ccache/ - artifacts: - paths: - - build-bionic/pkg script: - mkdir -p $TEST_INSTALL_DESTDIR - ./scripts/build_ubuntu.sh +bionic-pkg: + extends: .u-pkg + variables: + GIT_CLONE_PATH: $CI_BUILDS_DIR/aktualizr-bionic-pkg-$CI_JOB_ID + GIT_SUBMODULE_STRATEGY: 'recursive' + + TEST_BUILD_DIR: 'build-bionic' + TEST_INSTALL_RELEASE_NAME: '-ubuntu_18.04' + TEST_INSTALL_DESTDIR: "$CI_PROJECT_DIR/build-bionic/pkg" + image: "$UBUNTU_BIONIC_PR_IMAGE" + artifacts: + paths: + - build-bionic/pkg + xenial-pkg: + extends: .u-pkg variables: + GIT_CLONE_PATH: $CI_BUILDS_DIR/aktualizr-xenial-pkg-$CI_JOB_ID + GIT_SUBMODULE_STRATEGY: 'recursive' + TEST_BUILD_DIR: 'build-xenial' TEST_INSTALL_RELEASE_NAME: '-ubuntu_16.04' TEST_INSTALL_DESTDIR: "$CI_PROJECT_DIR/build-xenial/pkg" - image: "$UBUNTU_XENIAL_PR_IMAGE" - stage: test - except: - - /^20\d\d\.\d\d?-docs$/ - - /^docs\// - cache: - key: "$CI_JOB_NAME" - paths: - - ccache/ artifacts: paths: - build-xenial/pkg - script: - - mkdir -p $TEST_INSTALL_DESTDIR - - ./scripts/build_ubuntu.sh -bionic-pkg-test: - variables: - TEST_INSTALL_DESTDIR: "$CI_PROJECT_DIR/build-bionic/pkg" - image: "$UBUNTU_BIONIC_PR_INSTALLIMAGE" - dependencies: - - bionic-pkg +.pkg-test: stage: pkg-test except: - /^20\d\d\.\d\d?-docs$/ @@ -195,26 +230,28 @@ bionic-pkg-test: - ./scripts/test_install_garage_deploy.sh - ./scripts/test_install_aktualizr.sh +bionic-pkg-test: + extends: .pkg-test + variables: + TEST_INSTALL_DESTDIR: "$CI_PROJECT_DIR/build-bionic/pkg" + needs: ["bionic-pkg"] + image: "$UBUNTU_BIONIC_PR_INSTALLIMAGE" + xenial-pkg-test: + extends: .pkg-test variables: TEST_INSTALL_DESTDIR: "$CI_PROJECT_DIR/build-xenial/pkg" - + needs: ["xenial-pkg"] image: "$UBUNTU_XENIAL_PR_INSTALLIMAGE" - dependencies: - - xenial-pkg - stage: pkg-test - except: - - /^20\d\d\.\d\d?-docs$/ - - /^docs\// - script: - - ./scripts/test_install_garage_deploy.sh - - ./scripts/test_install_aktualizr.sh + # -- yocto tests OE Docker setup: extends: .bb_docker_remote stage: docker + variables: + GIT_CHECKOUT: 'false' only: variables: - $OE_PTEST @@ -228,7 +265,9 @@ OE Docker setup: OE Checkout: extends: .bb_checkout stage: oe-checkout - dependencies: [] + needs: [] + variables: + GIT_CHECKOUT: 'false' only: variables: - $OE_PTEST @@ -238,11 +277,13 @@ OE Checkout: - /^20\d\d\.\d\d?-docs$/ - /^docs\// variables: - MANIFEST: master + MANIFEST: dunfell Ptest qemux86_64: extends: .oe-selftest stage: oe-test + variables: + GIT_CHECKOUT: false dependencies: - OE Checkout allow_failure: true @@ -268,10 +309,10 @@ Ptest qemux86_64: github-release: image: "$UBUNTU_BIONIC_PR_IMAGE" stage: deploy - dependencies: - - bionic-pkg - - xenial-pkg + # focal-build-static is needed for doxygen + needs: ["bionic-pkg", "xenial-pkg", "focal-build-static"] script: + # github release - ./scripts/make_src_archive.sh ./aktualizr_src-$CI_COMMIT_TAG.tar.gz - >- ./scripts/publish_github_rls.py $CI_COMMIT_TAG @@ -280,16 +321,22 @@ github-release: build-bionic/pkg/garage_deploy-ubuntu_18.04.deb build-bionic/pkg/aktualizr-ubuntu_18.04.deb aktualizr_src-$CI_COMMIT_TAG.tar.gz + # github doxygen + - ./scripts/publish_github_docs.sh only: variables: - $CI_COMMIT_TAG =~ /^\d\d\d\d\.\d+(-\w+)?$/ + except: + - /^20\d\d\.\d\d?-docs$/ + variables: + DOX_DOCS: 'build-ubuntu-focal/docs/doxygen/html' + TEST_BUILD_DIR: 'build-gh-rls' # -- publish coverage results on gitlab pages pages: stage: deploy - dependencies: - - coverage + needs: ["coverage"] script: - mv build-coverage/coverage/ public/ artifacts: @@ -300,47 +347,6 @@ pages: only: - master -# -- veracode - -veracode-pkg: - # prepare build to be submitted for static code analysis - stage: test - only: - variables: - - $VERACODE_API_ID - variables: - TEST_BUILD_DIR: 'scan-build' - TEST_CMAKE_BUILD_TYPE: 'Debug' - TEST_WITH_TESTSUITE: '0' - TEST_WITH_SOTA_TOOLS: '0' - TEST_WITH_OSTREE: '0' - TEST_WITH_DEB: '0' - TEST_WITH_ISOTP: '0' - TEST_WITH_PARTIAL: '0' - image: "$UBUNTU_BIONIC_PR_IMAGE" - script: - - ./scripts/test.sh - - tar -f scan.tar --append /tmp/aktualizr/usr/local/bin - artifacts: - paths: - - scan.tar - -trigger-veracode-scan: - stage: trigger - only: - variables: - - $VERACODE_API_ID - dependencies: - - veracode-pkg - allow_failure: true - image: openjdk:8 - before_script: - # The latest wrapper version can be found in https://repo1.maven.org/maven2/com/veracode/vosp/api/wrappers/vosp-api-wrappers-java/ - - wget -q -O veracode-wrapper.jar https://repo1.maven.org/maven2/com/veracode/vosp/api/wrappers/vosp-api-wrappers-java/${VERACODE_WRAPPER_VERSION}/vosp-api-wrappers-java-${VERACODE_WRAPPER_VERSION}.jar - script: - - java -jar veracode-wrapper.jar -vid ${VERACODE_API_ID} -vkey ${VERACODE_API_KEY} - -action UploadAndScan -appname "OTA Client" -createprofile true -autoscan true - -filepath scan.tar -version "job ${CI_JOB_ID} in pipeline ${CI_PIPELINE_ID} for ${CI_PROJECT_NAME} repo" # -- e2e @@ -351,20 +357,22 @@ app-docker-image: stage: deploy only: - master - dependencies: [] + variables: + GIT_SUBMODULE_STRATEGY: recursive + needs: [] allow_failure: true before_script: - docker login -u gitlab-ci-token -p "$CI_JOB_TOKEN" "$CI_REGISTRY" script: - - sed 's@$BUILDER@'$UBUNTU_BIONIC_MASTER_IMAGE'@' ./ci/gitlab/Dockerfile.aktualizr > ./ci/gitlab/Dockerfile - - docker build -t $CI_REGISTRY_IMAGE/app:ci-$CI_COMMIT_REF_SLUG -f ./ci/gitlab/Dockerfile . + - cp ./docker/Dockerfile.aktualizr ./ci/gitlab/Dockerfile + - docker build --build-arg AKTUALIZR_BASE=$UBUNTU_BIONIC_MASTER_IMAGE -t $CI_REGISTRY_IMAGE/app:ci-$CI_COMMIT_REF_SLUG -f ./ci/gitlab/Dockerfile . - docker push $CI_REGISTRY_IMAGE/app:ci-$CI_COMMIT_REF_SLUG -trigger-e2e-pipeline: +trigger-device-farm-pipeline: stage: trigger only: - master - trigger: olp/edge/ota/testing/ota-plus-test-driver + trigger: olp/edge/ota/testing/device-farm # -- otf @@ -372,8 +380,7 @@ trigger-otf-pipeline: image: "$UBUNTU_BIONIC_PR_IMAGE" stage: trigger when: on_success - dependencies: - - github-release + needs: ["github-release"] script: - curl -X POST -F "token=$CI_JOB_TOKEN" -F "ref=master" -F "variables[TEST_JOB_ONLY]=true" https://main.gitlab.in.here.com/api/v4/projects/163/trigger/pipeline only: @@ -391,3 +398,58 @@ trigger-docsite-build: trigger: project: olp/edge/ota/documentation/ota-connect-docs branch: master + +trigger-osx-build: + stage: trigger + trigger: + project: olp/edge/ota/connect/client/homebrew-otaconnect + branch: master + rules: + - if: $OSX_BUILD + when: always + +build-osx-release: + stage: trigger + needs: ["github-release"] + variables: + VERSION: "$CI_COMMIT_TAG" + REVISION: "$CI_COMMIT_SHA" + RELEASE_BASE_URL: "https://github.com/advancedtelematic/aktualizr/releases/download" + GITHUB_REPOSITORY: "uptane/aktualizr" + GITHUB_TOKEN: "$GITHUB_API_TOKEN" + HOMEBREW_GITHUB_API_TOKEN: "$GITHUB_API_TOKEN" + FORMULA_DIR: "/usr/local/Homebrew/Library/Taps/advancedtelematic/homebrew-otaconnect" + FORMULA_FILE: "${FORMULA_DIR}/aktualizr.rb" + before_script: + - brew uninstall -f aktualizr + - brew untap advancedtelematic/otaconnect + - rm -rf $(brew --cache)/aktualizr--git + - brew install ghr + script: + # clone a repo that contains the aktualizr formula + - brew tap advancedtelematic/otaconnect + # update Version and Revision in the formula + - sed -i '' -E "s/ version = \"20[1-2][0-9].[0-9]+\"/ version = \"${VERSION}\"/" ${FORMULA_FILE} + - sed -i '' -E "s/ revision = \".*\"/ revision = \"${REVISION}\"/" ${FORMULA_FILE} + # build aktualizr + - brew install -v --build-bottle aktualizr + - aktualizr --version + # create aktualizr bottle - an archive/tar.gz file along with a json file containing its metadata + - brew bottle --json --no-rebuild --force-core-tap --root-url=${RELEASE_BASE_URL}/${VERSION} aktualizr + # undo changes in the formula (version and revision) + - git -C ${FORMULA_DIR} stash + # update the formula with sha256 hash of the new bottle (the archive file) + - brew bottle --merge --write --no-commit ./aktualizr--${VERSION}.mojave.bottle.json + # update Version and Revision in the formula and create a pull request with the updated formula + # that contains the new version, revision and sha256 of the bottle/archive file + - brew bump-formula-pr -v -d -f --tag=${VERSION} --revision=${REVISION} --no-browse aktualizr + # add the bottle file to the github release artifacts + - mv aktualizr--${VERSION}.mojave.bottle.tar.gz aktualizr-${VERSION}.mojave.bottle.tar.gz + - ghr -u "${GITHUB_REPOSITORY%/*}" -r "${GITHUB_REPOSITORY#*/}" ${VERSION} aktualizr-${VERSION}.mojave.bottle.tar.gz + rules: + - if: $CI_COMMIT_TAG =~ /^20\d\d\.\d\d?-docs$/ + when: never + - if: $OSX_RELEASE && $CI_COMMIT_TAG =~ /^\d\d\d\d\.\d+(-\w+)?$/ + when: on_success + tags: + - osx diff --git a/ci/gitlab/Dockerfile.aktualizr b/ci/gitlab/Dockerfile.aktualizr deleted file mode 100644 index 84ce6a291d..0000000000 --- a/ci/gitlab/Dockerfile.aktualizr +++ /dev/null @@ -1,8 +0,0 @@ -FROM $BUILDER as builder -LABEL Description="Aktualizr application dockerfile" - -ADD . /aktualizr -WORKDIR /aktualizr/build - -RUN cmake -DFAULT_INJECTION=on -DBUILD_SOTA_TOOLS=on -DBUILD_DEB=on -DCMAKE_BUILD_TYPE=Debug .. -RUN make -j8 install diff --git a/cmake-modules/AddAktualizrTest.cmake b/cmake-modules/AddAktualizrTest.cmake index 4af71b6db0..aea33843ee 100644 --- a/cmake-modules/AddAktualizrTest.cmake +++ b/cmake-modules/AddAktualizrTest.cmake @@ -4,10 +4,10 @@ function(add_aktualizr_test) set(multiValueArgs SOURCES LIBRARIES ARGS LAUNCH_CMD) cmake_parse_arguments(AKTUALIZR_TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) set(TEST_TARGET t_${AKTUALIZR_TEST_NAME}) - add_executable(${TEST_TARGET} EXCLUDE_FROM_ALL ${AKTUALIZR_TEST_SOURCES} ${PROJECT_SOURCE_DIR}/tests/test_utils.cc) + + add_executable(${TEST_TARGET} EXCLUDE_FROM_ALL ${AKTUALIZR_TEST_SOURCES}) target_link_libraries(${TEST_TARGET} ${AKTUALIZR_TEST_LIBRARIES} - aktualizr_static_lib ${TEST_LIBS}) target_include_directories(${TEST_TARGET} PUBLIC ${PROJECT_SOURCE_DIR}/tests) diff --git a/cmake-modules/CodeCoverage.cmake b/cmake-modules/CodeCoverage.cmake index 10285228ca..cc6461bdbe 100644 --- a/cmake-modules/CodeCoverage.cmake +++ b/cmake-modules/CodeCoverage.cmake @@ -156,7 +156,7 @@ function(SETUP_TARGET_FOR_COVERAGE_LCOV) COMMAND ${LCOV_PATH} --gcov-tool ${GCOV_PATH} -c -i -d . -o ${Coverage_NAME}.base # Run tests - COMMAND ${Coverage_EXECUTABLE} + COMMAND ${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS} # Capturing lcov counters and generating report COMMAND ${LCOV_PATH} --gcov-tool ${GCOV_PATH} --directory . --capture --output-file ${Coverage_NAME}.info diff --git a/cmake-modules/FindSystemd.cmake b/cmake-modules/FindSystemd.cmake deleted file mode 100644 index 95e02869c8..0000000000 --- a/cmake-modules/FindSystemd.cmake +++ /dev/null @@ -1,19 +0,0 @@ -if (SYSTEMD_LIBRARY AND SYSTEMD_INCLUDE_DIR) - # in cache already - set(SYSTEMD_FOUND TRUE) -else (SYSTEMD_LIBRARY AND SYSTEMD_INCLUDE_DIR) - find_package(PkgConfig QUIET) - if (PKG_CONFIG_FOUND) - pkg_check_modules(systemd_PKG QUIET systemd) - set(XPREFIX systemd_PKG) - endif() - - find_path(SYSTEMD_INCLUDE_DIR - NAMES sd-daemon.h - HINTS ${${XPREFIX}_INCLUDE_DIRS} - PATH_SUFFIXES systemd) - find_library(SYSTEMD_LIBRARY - NAMES ${${XPREFIX}_LIBRARIES} systemd - HINTS ${${XPREFIX}_LIBRARY_DIRS}} - PATH_SUFFIXES systemd) -endif() diff --git a/codecov.yml b/codecov.yml index 7b5afb7a89..00d4bc41fe 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,4 +1,10 @@ +coverage: + status: + project: + default: + threshold: 0.3% ignore: - "third_party" - "tests" - "**/*_test.cc" + - "src/libaktualizr-c/test" diff --git a/config/CMakeLists.txt b/config/CMakeLists.txt index da6925fdd0..b9a582060a 100644 --- a/config/CMakeLists.txt +++ b/config/CMakeLists.txt @@ -1,20 +1,20 @@ if(BUILD_DEB) install(FILES systemd/aktualizr-ubuntu.service - DESTINATION /lib/systemd/system + DESTINATION ${CMAKE_INSTALL_LIBDIR}/systemd/system RENAME aktualizr.service PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ COMPONENT aktualizr) - install(FILES sota-ubuntu.toml - DESTINATION lib/sota/conf.d + install(FILES sota-local.toml + DESTINATION ${CMAKE_INSTALL_LIBDIR}/sota/conf.d PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ COMPONENT aktualizr) install(FILES secondary/virtualsec.json - DESTINATION lib/sota/secondaries + DESTINATION ${CMAKE_INSTALL_LIBDIR}/sota/secondaries RENAME demo_secondary.json PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ COMPONENT aktualizr) endif(BUILD_DEB) -install(DIRECTORY DESTINATION lib/sota/conf.d COMPONENT aktualizr) +install(DIRECTORY DESTINATION ${CMAKE_INSTALL_LIBDIR}/sota/conf.d COMPONENT aktualizr) diff --git a/config/posix-secondary-config.json b/config/posix-secondary-config.json index 73db16b27c..26a6bc1564 100644 --- a/config/posix-secondary-config.json +++ b/config/posix-secondary-config.json @@ -2,6 +2,6 @@ "IP": { "secondaries_wait_port": 9040, "secondaries_wait_timeout": 120, - "secondaries": [{"addr": "127.0.0.1:9050"}] + "secondaries": [{"addr": "127.0.0.1:9050", "verification_type": "Full"}] } } diff --git a/config/posix-secondary.toml b/config/posix-secondary.toml index 008636b3cd..153a3d37db 100644 --- a/config/posix-secondary.toml +++ b/config/posix-secondary.toml @@ -2,7 +2,6 @@ ecu_hardware_id = "local-fake-secondary" [storage] -type = "sqlite" path = "storage" sqldb_path = "secondary.db" @@ -12,7 +11,7 @@ primary_ip = "127.0.0.1" primary_port = 9040 [pacman] -type = "fake" +type = "none" [logger] loglevel = 0 diff --git a/config/secondary/virtualsec.json b/config/secondary/virtualsec.json index 7ac2a3b9a9..a2d8b3f0e4 100644 --- a/config/secondary/virtualsec.json +++ b/config/secondary/virtualsec.json @@ -1,7 +1,7 @@ { "virtual": [ { - "partial_verifying": "false", + "partial_verifying": false, "ecu_hardware_id": "demo-virtual", "full_client_dir": "storage/demo-vsec1", "ecu_private_key": "sec.private", @@ -11,7 +11,7 @@ "metadata_path": "storage/demo-vsec1/metadata" }, { - "partial_verifying": "false", + "partial_verifying": false, "ecu_hardware_id": "demo-virtual", "full_client_dir": "storage/demo-vsec2", "ecu_private_key": "sec.private", diff --git a/config/sota-device-cred-hsm.toml b/config/sota-device-cred-hsm.toml index 1efc6170cb..ce556e0d46 100644 --- a/config/sota-device-cred-hsm.toml +++ b/config/sota-device-cred-hsm.toml @@ -13,9 +13,6 @@ tls_pkey_id = "02" [uptane] key_source = "pkcs11" -[storage] -type = "sqlite" - [import] base_path = "/var/sota/import" tls_cacert_path = "root.crt" diff --git a/config/sota-device-cred.toml b/config/sota-device-cred.toml index f527cf5699..82ff8dad67 100644 --- a/config/sota-device-cred.toml +++ b/config/sota-device-cred.toml @@ -1,9 +1,6 @@ [tls] server_url_path = "/var/sota/import/gateway.url" -[storage] -type = "sqlite" - [import] base_path = "/var/sota/import" tls_cacert_path = "root.crt" diff --git a/config/sota-local-with-secondaries.toml b/config/sota-local-with-secondaries.toml index c49756f238..60da3f72f4 100644 --- a/config/sota-local-with-secondaries.toml +++ b/config/sota-local-with-secondaries.toml @@ -3,7 +3,6 @@ provision_path = "credentials.zip" primary_ecu_hardware_id = "local-fake" [uptane] -polling_sec = 1 secondary_config_file = "posix-secondary-config.json" [logger] @@ -14,3 +13,4 @@ path = "storage" [pacman] type = "none" +images_path = "storage/images" diff --git a/config/sota-local.toml b/config/sota-local.toml index 4708d5b0cb..fdd34e1599 100644 --- a/config/sota-local.toml +++ b/config/sota-local.toml @@ -10,6 +10,4 @@ path = "storage" [pacman] type = "none" - -[uptane] -secondary_config_file = "virtualsec.json" +images_path = "storage/images" diff --git a/config/sota-secondary.toml b/config/sota-secondary.toml index f723537081..6c95a7b807 100644 --- a/config/sota-secondary.toml +++ b/config/sota-secondary.toml @@ -1,3 +1,2 @@ [storage] -type = "sqlite" sqldb_path = "/var/sota/secondary.db" diff --git a/config/sota-shared-cred.toml b/config/sota-shared-cred.toml index 7db1f3fd68..cadb1ad273 100644 --- a/config/sota-shared-cred.toml +++ b/config/sota-shared-cred.toml @@ -1,5 +1,2 @@ [provision] provision_path = "/var/sota/sota_provisioning_credentials.zip" - -[storage] -type = "sqlite" diff --git a/config/sota-ubuntu.toml b/config/sota-ubuntu.toml deleted file mode 100644 index 4a5f683a55..0000000000 --- a/config/sota-ubuntu.toml +++ /dev/null @@ -1,9 +0,0 @@ -[provision] -provision_path = "/var/sota/sota_provisioning_credentials.zip" -primary_ecu_hardware_id = "ubuntu" - -[storage] -type = "sqlite" - -[pacman] -type = "debian" diff --git a/config/sql/migration/migrate.00.sql b/config/sql/migration/migrate.00.sql index f574dd06e0..2723618cbc 100644 --- a/config/sql/migration/migrate.00.sql +++ b/config/sql/migration/migrate.00.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; CREATE TABLE version(version INTEGER); diff --git a/config/sql/migration/migrate.01.sql b/config/sql/migration/migrate.01.sql index d25f1acd6a..655e8805f5 100644 --- a/config/sql/migration/migrate.01.sql +++ b/config/sql/migration/migrate.01.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; ALTER TABLE primary_image ADD COLUMN installed_versions TEXT NOT NULL DEFAULT ''; diff --git a/config/sql/migration/migrate.02.sql b/config/sql/migration/migrate.02.sql index a0ebe830ca..7d8086c887 100644 --- a/config/sql/migration/migrate.02.sql +++ b/config/sql/migration/migrate.02.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; diff --git a/config/sql/migration/migrate.06.sql b/config/sql/migration/migrate.06.sql index 135b34d1e8..433ffc146d 100644 --- a/config/sql/migration/migrate.06.sql +++ b/config/sql/migration/migrate.06.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; DROP TABLE root_meta; diff --git a/config/sql/migration/migrate.07.sql b/config/sql/migration/migrate.07.sql index cba3be9c84..08b1d690e3 100644 --- a/config/sql/migration/migrate.07.sql +++ b/config/sql/migration/migrate.07.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; CREATE TABLE installation_result(unique_mark INTEGER PRIMARY KEY CHECK (unique_mark = 0), id TEXT, result_code INTEGER NOT NULL DEFAULT 0, result_text TEXT); diff --git a/config/sql/migration/migrate.08.sql b/config/sql/migration/migrate.08.sql index 5f1c4f0278..2d28fb59f6 100644 --- a/config/sql/migration/migrate.08.sql +++ b/config/sql/migration/migrate.08.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; CREATE TABLE primary_keys_migrate(unique_mark INTEGER PRIMARY KEY CHECK (unique_mark = 0), private TEXT, public TEXT); diff --git a/config/sql/migration/migrate.09.sql b/config/sql/migration/migrate.09.sql index b0327d6867..0281ba5ee5 100644 --- a/config/sql/migration/migrate.09.sql +++ b/config/sql/migration/migrate.09.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; CREATE TABLE installed_versions_migrate(hash TEXT, name TEXT NOT NULL, is_current INTEGER NOT NULL CHECK (is_current IN (0,1)) DEFAULT 0, length INTEGER NOT NULL DEFAULT 0, UNIQUE(hash, name)); diff --git a/config/sql/migration/migrate.10.sql b/config/sql/migration/migrate.10.sql index 38631117c7..d2d794b1c8 100644 --- a/config/sql/migration/migrate.10.sql +++ b/config/sql/migration/migrate.10.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; ALTER TABLE target_images ADD real_size INTEGER NOT NULL DEFAULT 0; diff --git a/config/sql/migration/migrate.11.sql b/config/sql/migration/migrate.11.sql index e73174dc36..437d8a1289 100644 --- a/config/sql/migration/migrate.11.sql +++ b/config/sql/migration/migrate.11.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; ALTER TABLE target_images ADD sha256 TEXT NOT NULL DEFAULT ""; diff --git a/config/sql/migration/migrate.12.sql b/config/sql/migration/migrate.12.sql index 024fa0b782..0ea04596b7 100644 --- a/config/sql/migration/migrate.12.sql +++ b/config/sql/migration/migrate.12.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; CREATE TABLE need_reboot(unique_mark INTEGER PRIMARY KEY CHECK (unique_mark = 0), flag INTEGER NOT NULL DEFAULT 0); diff --git a/config/sql/migration/migrate.13.sql b/config/sql/migration/migrate.13.sql index 4cd502bfab..e4b4e259d0 100644 --- a/config/sql/migration/migrate.13.sql +++ b/config/sql/migration/migrate.13.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; CREATE TABLE installed_versions_migrate(ecu_serial TEXT NOT NULL, sha256 TEXT NOT NULL, name TEXT NOT NULL, hashes TEXT NOT NULL, length INTEGER NOT NULL DEFAULT 0, correlation_id TEXT NOT NULL DEFAULT '', is_current INTEGER NOT NULL CHECK (is_current IN (0,1)) DEFAULT 0, is_pending INTEGER NOT NULL CHECK (is_pending IN (0,1)) DEFAULT 0, UNIQUE(ecu_serial, sha256, name)); diff --git a/config/sql/migration/migrate.14.sql b/config/sql/migration/migrate.14.sql index c245e61f6c..3b494868de 100644 --- a/config/sql/migration/migrate.14.sql +++ b/config/sql/migration/migrate.14.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; CREATE TABLE target_images_data(filename TEXT PRIMARY KEY, image_data BLOB NOT NULL); diff --git a/config/sql/migration/migrate.15.sql b/config/sql/migration/migrate.15.sql index 1efc0d97b4..9daaa500bc 100644 --- a/config/sql/migration/migrate.15.sql +++ b/config/sql/migration/migrate.15.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; CREATE TABLE rollback_migrations(version_from INT PRIMARY KEY, migration TEXT NOT NULL); diff --git a/config/sql/migration/migrate.16.sql b/config/sql/migration/migrate.16.sql index 4b5b8f60ee..d11e3a0e6c 100644 --- a/config/sql/migration/migrate.16.sql +++ b/config/sql/migration/migrate.16.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; DROP TABLE installation_result; diff --git a/config/sql/migration/migrate.17.sql b/config/sql/migration/migrate.17.sql index edb76255b7..359a5f634a 100644 --- a/config/sql/migration/migrate.17.sql +++ b/config/sql/migration/migrate.17.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; CREATE TABLE delegations(meta BLOB NOT NULL, role_name TEXT NOT NULL, UNIQUE(role_name)); diff --git a/config/sql/migration/migrate.18.sql b/config/sql/migration/migrate.18.sql index 27b89fb10b..c611362181 100644 --- a/config/sql/migration/migrate.18.sql +++ b/config/sql/migration/migrate.18.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; DROP TABLE target_images_data; diff --git a/config/sql/migration/migrate.19.sql b/config/sql/migration/migrate.19.sql index 6f67bcb2d1..1d0830480a 100644 --- a/config/sql/migration/migrate.19.sql +++ b/config/sql/migration/migrate.19.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; CREATE TABLE ecu_serials_migrate(id INTEGER PRIMARY KEY, serial TEXT UNIQUE, hardware_id TEXT NOT NULL, is_primary INTEGER NOT NULL DEFAULT 0 CHECK (is_primary IN (0,1))); diff --git a/config/sql/migration/migrate.20.sql b/config/sql/migration/migrate.20.sql index 4b49610174..636c081867 100644 --- a/config/sql/migration/migrate.20.sql +++ b/config/sql/migration/migrate.20.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; ALTER TABLE installed_versions RENAME TO installed_versions_old; diff --git a/config/sql/migration/migrate.21.sql b/config/sql/migration/migrate.21.sql index 95a5a7511a..c48b0e71d2 100644 --- a/config/sql/migration/migrate.21.sql +++ b/config/sql/migration/migrate.21.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT MIGRATION; ALTER TABLE installed_versions ADD COLUMN custom_meta TEXT NOT NULL DEFAULT ""; diff --git a/config/sql/migration/migrate.22.sql b/config/sql/migration/migrate.22.sql new file mode 100644 index 0000000000..aa816abd96 --- /dev/null +++ b/config/sql/migration/migrate.22.sql @@ -0,0 +1,9 @@ +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc +SAVEPOINT MIGRATION; + +CREATE TABLE ecu_report_counter(ecu_serial TEXT NOT NULL PRIMARY KEY, counter INTEGER NOT NULL DEFAULT 0); + +DELETE FROM version; +INSERT INTO version VALUES(22); + +RELEASE MIGRATION; diff --git a/config/sql/migration/migrate.23.sql b/config/sql/migration/migrate.23.sql new file mode 100644 index 0000000000..5518e5efe5 --- /dev/null +++ b/config/sql/migration/migrate.23.sql @@ -0,0 +1,10 @@ +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc +SAVEPOINT MIGRATION; + +ALTER TABLE ecu_serials RENAME TO ecus; +CREATE TABLE secondary_ecus(serial TEXT PRIMARY KEY, sec_type TEXT, public_key_type TEXT, public_key TEXT, extra TEXT, manifest TEXT); + +DELETE FROM version; +INSERT INTO version VALUES(23); + +RELEASE MIGRATION; diff --git a/config/sql/migration/migrate.24.sql b/config/sql/migration/migrate.24.sql new file mode 100644 index 0000000000..8d424fff57 --- /dev/null +++ b/config/sql/migration/migrate.24.sql @@ -0,0 +1,9 @@ +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc +SAVEPOINT MIGRATION; + +CREATE TABLE report_events(id INTEGER PRIMARY KEY, json_string TEXT NOT NULL); + +DELETE FROM version; +INSERT INTO version VALUES(24); + +RELEASE MIGRATION; diff --git a/config/sql/migration/migrate.25.sql b/config/sql/migration/migrate.25.sql new file mode 100644 index 0000000000..cdd03ee7bc --- /dev/null +++ b/config/sql/migration/migrate.25.sql @@ -0,0 +1,9 @@ +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc +SAVEPOINT MIGRATION; + +CREATE TABLE device_data(data_type TEXT PRIMARY KEY, hash TEXT NOT NULL); + +DELETE FROM version; +INSERT INTO version VALUES(25); + +RELEASE MIGRATION; diff --git a/config/sql/rollback/rollback.15.sql b/config/sql/rollback/rollback.15.sql index d4e91c01e8..7d26ea7ab9 100644 --- a/config/sql/rollback/rollback.15.sql +++ b/config/sql/rollback/rollback.15.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT ROLLBACK_MIGRATION; DROP TABLE rollback_migrations; diff --git a/config/sql/rollback/rollback.16.sql b/config/sql/rollback/rollback.16.sql index 77ffe2950b..da70342559 100644 --- a/config/sql/rollback/rollback.16.sql +++ b/config/sql/rollback/rollback.16.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT ROLLBACK_MIGRATION; DROP TABLE device_installation_result; diff --git a/config/sql/rollback/rollback.17.sql b/config/sql/rollback/rollback.17.sql index 989b33c3f9..a72047ae16 100644 --- a/config/sql/rollback/rollback.17.sql +++ b/config/sql/rollback/rollback.17.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT ROLLBACK_MIGRATION; DROP TABLE delegations; diff --git a/config/sql/rollback/rollback.18.sql b/config/sql/rollback/rollback.18.sql index ff4e7530fc..7b77c7661a 100644 --- a/config/sql/rollback/rollback.18.sql +++ b/config/sql/rollback/rollback.18.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT ROLLBACK_MIGRATION; CREATE TABLE target_images_data(filename TEXT PRIMARY KEY, image_data BLOB NOT NULL); diff --git a/config/sql/rollback/rollback.19.sql b/config/sql/rollback/rollback.19.sql index fda03e8b52..b469a3b137 100644 --- a/config/sql/rollback/rollback.19.sql +++ b/config/sql/rollback/rollback.19.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT ROLLBACK_MIGRATION; CREATE TABLE ecu_serials_migrate(serial TEXT UNIQUE, hardware_id TEXT NOT NULL, is_primary INTEGER NOT NULL CHECK (is_primary IN (0,1))); diff --git a/config/sql/rollback/rollback.20.sql b/config/sql/rollback/rollback.20.sql index 3a30813015..629ef65205 100644 --- a/config/sql/rollback/rollback.20.sql +++ b/config/sql/rollback/rollback.20.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT ROLLBACK_MIGRATION; CREATE TABLE installed_versions_migrate(ecu_serial TEXT NOT NULL, sha256 TEXT NOT NULL, name TEXT NOT NULL, hashes TEXT NOT NULL, length INTEGER NOT NULL DEFAULT 0, correlation_id TEXT NOT NULL DEFAULT '', is_current INTEGER NOT NULL CHECK (is_current IN (0,1)) DEFAULT 0, is_pending INTEGER NOT NULL CHECK (is_pending IN (0,1)) DEFAULT 0, UNIQUE(ecu_serial, sha256, name)); diff --git a/config/sql/rollback/rollback.21.sql b/config/sql/rollback/rollback.21.sql index 01ca624a4f..44d8113928 100644 --- a/config/sql/rollback/rollback.21.sql +++ b/config/sql/rollback/rollback.21.sql @@ -1,4 +1,4 @@ --- Don't modify this! Create a new migration instead--see docs/schema-migrations.adoc +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc SAVEPOINT ROLLBACK_MIGRATION; CREATE TABLE installed_versions_migrate(id INTEGER PRIMARY KEY, ecu_serial TEXT NOT NULL, sha256 TEXT NOT NULL, name TEXT NOT NULL, hashes TEXT NOT NULL, length INTEGER NOT NULL DEFAULT 0, correlation_id TEXT NOT NULL DEFAULT '', is_current INTEGER NOT NULL CHECK (is_current IN (0,1)) DEFAULT 0, is_pending INTEGER NOT NULL CHECK (is_pending IN (0,1)) DEFAULT 0, was_installed INTEGER NOT NULL CHECK (was_installed IN (0,1)) DEFAULT 0); diff --git a/config/sql/rollback/rollback.22.sql b/config/sql/rollback/rollback.22.sql new file mode 100644 index 0000000000..f430a7d814 --- /dev/null +++ b/config/sql/rollback/rollback.22.sql @@ -0,0 +1,9 @@ +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc +SAVEPOINT ROLLBACK_MIGRATION; + +DROP TABLE ecu_report_counter; + +DELETE FROM version; +INSERT INTO version VALUES(21); + +RELEASE ROLLBACK_MIGRATION; diff --git a/config/sql/rollback/rollback.23.sql b/config/sql/rollback/rollback.23.sql new file mode 100644 index 0000000000..117fc6e472 --- /dev/null +++ b/config/sql/rollback/rollback.23.sql @@ -0,0 +1,10 @@ +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc +SAVEPOINT ROLLBACK_MIGRATION; + +DROP TABLE secondary_ecus; +ALTER TABLE ecus RENAME TO ecu_serials; + +DELETE FROM version; +INSERT INTO version VALUES(22); + +RELEASE ROLLBACK_MIGRATION; diff --git a/config/sql/rollback/rollback.24.sql b/config/sql/rollback/rollback.24.sql new file mode 100644 index 0000000000..b7abf49ff4 --- /dev/null +++ b/config/sql/rollback/rollback.24.sql @@ -0,0 +1,9 @@ +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc +SAVEPOINT ROLLBACK_MIGRATION; + +DROP TABLE report_events; + +DELETE FROM version; +INSERT INTO version VALUES(23); + +RELEASE ROLLBACK_MIGRATION; diff --git a/config/sql/rollback/rollback.25.sql b/config/sql/rollback/rollback.25.sql new file mode 100644 index 0000000000..540f1a4185 --- /dev/null +++ b/config/sql/rollback/rollback.25.sql @@ -0,0 +1,9 @@ +-- Don't modify this! Create a new migration instead--see docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc +SAVEPOINT ROLLBACK_MIGRATION; + +DROP TABLE device_data; + +DELETE FROM version; +INSERT INTO version VALUES(24); + +RELEASE ROLLBACK_MIGRATION; diff --git a/config/sql/schema.sql b/config/sql/schema.sql index 94e025ba8e..cf3cf98d77 100644 --- a/config/sql/schema.sql +++ b/config/sql/schema.sql @@ -1,7 +1,8 @@ CREATE TABLE version(version INTEGER); -INSERT INTO version(rowid,version) VALUES(1,21); +INSERT INTO version(rowid,version) VALUES(1,25); CREATE TABLE device_info(unique_mark INTEGER PRIMARY KEY CHECK (unique_mark = 0), device_id TEXT, is_registered INTEGER NOT NULL DEFAULT 0 CHECK (is_registered IN (0,1))); -CREATE TABLE ecu_serials(id INTEGER PRIMARY KEY, serial TEXT UNIQUE, hardware_id TEXT NOT NULL, is_primary INTEGER NOT NULL DEFAULT 0 CHECK (is_primary IN (0,1))); +CREATE TABLE ecus(id INTEGER PRIMARY KEY, serial TEXT UNIQUE, hardware_id TEXT NOT NULL, is_primary INTEGER NOT NULL DEFAULT 0 CHECK (is_primary IN (0,1))); +CREATE TABLE secondary_ecus(serial TEXT PRIMARY KEY, sec_type TEXT, public_key_type TEXT, public_key TEXT, extra TEXT, manifest TEXT); CREATE TABLE misconfigured_ecus(serial TEXT UNIQUE, hardware_id TEXT NOT NULL, state INTEGER NOT NULL CHECK (state IN (0,1))); CREATE TABLE installed_versions(id INTEGER PRIMARY KEY, ecu_serial TEXT NOT NULL, sha256 TEXT NOT NULL, name TEXT NOT NULL, hashes TEXT NOT NULL, length INTEGER NOT NULL DEFAULT 0, correlation_id TEXT NOT NULL DEFAULT '', is_current INTEGER NOT NULL CHECK (is_current IN (0,1)) DEFAULT 0, is_pending INTEGER NOT NULL CHECK (is_pending IN (0,1)) DEFAULT 0, was_installed INTEGER NOT NULL CHECK (was_installed IN (0,1)) DEFAULT 0, custom_meta TEXT NOT NULL DEFAULT ""); CREATE TABLE primary_keys(unique_mark INTEGER PRIMARY KEY CHECK (unique_mark = 0), private TEXT, public TEXT); @@ -23,3 +24,6 @@ CREATE TABLE ecu_installation_results(ecu_serial TEXT NOT NULL PRIMARY KEY, succ CREATE TABLE need_reboot(unique_mark INTEGER PRIMARY KEY CHECK (unique_mark = 0), flag INTEGER NOT NULL DEFAULT 0); CREATE TABLE rollback_migrations(version_from INT PRIMARY KEY, migration TEXT NOT NULL); CREATE TABLE delegations(meta BLOB NOT NULL, role_name TEXT NOT NULL, UNIQUE(role_name)); +CREATE TABLE ecu_report_counter(ecu_serial TEXT NOT NULL PRIMARY KEY, counter INTEGER NOT NULL DEFAULT 0); +CREATE TABLE report_events(id INTEGER PRIMARY KEY, json_string TEXT NOT NULL); +CREATE TABLE device_data(data_type TEXT PRIMARY KEY, hash TEXT NOT NULL); diff --git a/docker/Dockerfile.aktualizr b/docker/Dockerfile.aktualizr index a2396fbec0..d80b07758a 100644 --- a/docker/Dockerfile.aktualizr +++ b/docker/Dockerfile.aktualizr @@ -1,4 +1,5 @@ -FROM advancedtelematic/aktualizr-base as builder +ARG AKTUALIZR_BASE=advancedtelematic/aktualizr-base +FROM $AKTUALIZR_BASE LABEL Description="Aktualizr application dockerfile" ADD . /aktualizr @@ -6,3 +7,4 @@ WORKDIR /aktualizr/build RUN cmake -DFAULT_INJECTION=on -DBUILD_SOTA_TOOLS=on -DBUILD_DEB=on -DCMAKE_BUILD_TYPE=Debug .. RUN make -j8 install +RUN ldconfig diff --git a/docker/Dockerfile.ubuntu.bionic b/docker/Dockerfile.ubuntu.bionic index 71eff140ac..0c56731a08 100644 --- a/docker/Dockerfile.ubuntu.bionic +++ b/docker/Dockerfile.ubuntu.bionic @@ -13,7 +13,6 @@ RUN apt-get update && apt-get -y install --no-install-suggests --no-install-reco awscli \ bison \ ccache \ - clang-format-6.0 \ cmake \ curl \ e2fslibs-dev \ @@ -45,7 +44,6 @@ RUN apt-get update && apt-get -y install --no-install-suggests --no-install-reco libsodium-dev \ libsqlite3-dev \ libssl-dev \ - libsystemd-dev \ libtool \ lshw \ make \ @@ -61,9 +59,12 @@ RUN apt-get update && apt-get -y install --no-install-suggests --no-install-reco python3-venv \ softhsm2 \ sqlite3 \ + strace \ valgrind \ wget \ - zip + xsltproc \ + zip \ + unzip WORKDIR /ostree RUN git init && git remote add origin https://github.com/ostreedev/ostree @@ -73,6 +74,6 @@ RUN ./configure CFLAGS='-Wno-error=missing-prototypes' --with-libarchive --disab RUN make VERBOSE=1 -j4 RUN make install -RUN useradd testuser +RUN useradd -m testuser WORKDIR / diff --git a/docker/Dockerfile.debian.testing b/docker/Dockerfile.ubuntu.focal similarity index 58% rename from docker/Dockerfile.debian.testing rename to docker/Dockerfile.ubuntu.focal index 409a6fed0f..c7edb8e9e4 100644 --- a/docker/Dockerfile.debian.testing +++ b/docker/Dockerfile.ubuntu.focal @@ -1,5 +1,5 @@ -FROM debian:testing -LABEL Description="Aktualizr testing dockerfile for Debian Unstable + static checks" +FROM ubuntu:focal +LABEL Description="Aktualizr testing dockerfile for Ubuntu Focal with static checks" ENV DEBIAN_FRONTEND noninteractive @@ -12,10 +12,10 @@ RUN apt-get update && apt-get -y install --no-install-suggests --no-install-reco automake \ bison \ ccache \ - clang-6.0 \ - clang-tidy-6.0 \ - clang-tools-6.0 \ - clang-format-6.0 \ + clang-11 \ + clang-format-11 \ + clang-tidy-11 \ + clang-tools-11 \ cmake \ curl \ doxygen \ @@ -41,40 +41,37 @@ RUN apt-get update && apt-get -y install --no-install-suggests --no-install-reco libgpgme11-dev \ libgtest-dev \ liblzma-dev \ + libostree-dev \ libp11-dev \ libsodium-dev \ libsqlite3-dev \ libssl-dev \ - libsystemd-dev \ libtool \ lshw \ make \ - ninja-build \ net-tools \ + ninja-build \ opensc \ + ostree \ pkg-config \ psmisc \ + python-is-python3 \ python3-dev \ python3-gi \ python3-openssl \ + python3-pip \ python3-venv \ softhsm2 \ sqlite3 \ + strace \ valgrind \ wget \ + xsltproc \ zip -RUN ln -s clang-6.0 /usr/bin/clang && \ - ln -s clang++-6.0 /usr/bin/clang++ - -WORKDIR /ostree -RUN git init && git remote add origin https://github.com/ostreedev/ostree -RUN git fetch origin v2018.9 && git checkout FETCH_HEAD -RUN NOCONFIGURE=1 ./autogen.sh -RUN ./configure CFLAGS='-Wno-error=missing-prototypes' --with-libarchive --disable-gtk-doc --disable-gtk-doc-html --disable-gtk-doc-pdf --disable-man --with-builtin-grub2-mkconfig --with-curl --without-soup --prefix=/usr -RUN make VERBOSE=1 -j4 -RUN make install +RUN ln -s clang-11 /usr/bin/clang && \ + ln -s clang++-11 /usr/bin/clang++ -RUN useradd testuser +RUN useradd -m testuser WORKDIR / diff --git a/docker/Dockerfile.ubuntu.xenial b/docker/Dockerfile.ubuntu.xenial index 26c212b3ec..add4e6fae6 100644 --- a/docker/Dockerfile.ubuntu.xenial +++ b/docker/Dockerfile.ubuntu.xenial @@ -42,7 +42,6 @@ RUN apt-get update && apt-get -y install --no-install-suggests --no-install-reco libsodium-dev \ libsqlite3-dev \ libssl-dev \ - libsystemd-dev \ libtool \ lshw \ make \ @@ -57,6 +56,7 @@ RUN apt-get update && apt-get -y install --no-install-suggests --no-install-reco python3-pip \ python3-venv \ sqlite3 \ + strace \ wget \ zip diff --git a/docs/README.adoc b/docs/README.adoc index 56af4583e7..81a8688141 100644 --- a/docs/README.adoc +++ b/docs/README.adoc @@ -1,12 +1,12 @@ = OTA Connect Developer Guide -This directory contains the source of our **OTA Connect Developer Guide** which is published to our https://docs.ota.here.com[documentation portal]. +The `ota-client-guide` subdirectory contains the source of our **OTA Connect Developer Guide** which is published to our https://docs.ota.here.com[documentation portal]. You can also read the source files for this guide. To read this guide locally or in GitHub, start with the xref:ota-client-guide/modules/ROOT/nav.adoc[table of contents]. [NOTE] ==== -Content includes, such as code snippets or reused text snippets will not render correctly in Github. +Some elements, such as code snippets or reused text snippets, will not render correctly in Github. ==== == Reference documentation @@ -29,6 +29,19 @@ The link above is for the doxygen docs on master. Doxygen docs for the following * https://advancedtelematic.github.io/aktualizr/2019.6/index.html[2019.6] * https://advancedtelematic.github.io/aktualizr/2019.7/index.html[2019.7] * https://advancedtelematic.github.io/aktualizr/2019.8/index.html[2019.8] +* https://advancedtelematic.github.io/aktualizr/2019.9/index.html[2019.9] +* https://advancedtelematic.github.io/aktualizr/2019.10/index.html[2019.10] +* https://advancedtelematic.github.io/aktualizr/2019.11/index.html[2019.11] +* https://advancedtelematic.github.io/aktualizr/2020.1/index.html[2020.1] +* https://advancedtelematic.github.io/aktualizr/2020.2/index.html[2020.2] +* https://advancedtelematic.github.io/aktualizr/2020.3/index.html[2020.3] +* https://advancedtelematic.github.io/aktualizr/2020.4/index.html[2020.4] +* https://advancedtelematic.github.io/aktualizr/2020.5/index.html[2020.5] +* https://advancedtelematic.github.io/aktualizr/2020.6/index.html[2020.6] +* https://advancedtelematic.github.io/aktualizr/2020.7/index.html[2020.7] +* https://advancedtelematic.github.io/aktualizr/2020.8/index.html[2020.8] +* https://advancedtelematic.github.io/aktualizr/2020.9/index.html[2020.9] +* https://advancedtelematic.github.io/aktualizr/2020.10/index.html[2020.10] ==== == Release process diff --git a/docs/client-provisioning-methods.adoc b/docs/client-provisioning-methods.adoc deleted file mode 120000 index fd7beb80bb..0000000000 --- a/docs/client-provisioning-methods.adoc +++ /dev/null @@ -1 +0,0 @@ -./ota-client-guide/modules/ROOT/pages/client-provisioning-methods.adoc \ No newline at end of file diff --git a/docs/configuration.adoc b/docs/configuration.adoc deleted file mode 120000 index 62a1467c83..0000000000 --- a/docs/configuration.adoc +++ /dev/null @@ -1 +0,0 @@ -./ota-client-guide/modules/ROOT/pages/aktualizr-config-options.adoc \ No newline at end of file diff --git a/docs/deb-package-install.adoc b/docs/deb-package-install.adoc deleted file mode 120000 index d9907225b8..0000000000 --- a/docs/deb-package-install.adoc +++ /dev/null @@ -1 +0,0 @@ -./ota-client-guide/modules/ROOT/pages/deb-package-install.adoc \ No newline at end of file diff --git a/docs/debugging-tips.adoc b/docs/debugging-tips.adoc deleted file mode 120000 index ed50e9e132..0000000000 --- a/docs/debugging-tips.adoc +++ /dev/null @@ -1 +0,0 @@ -./ota-client-guide/modules/ROOT/pages/debugging-tips.adoc \ No newline at end of file diff --git a/docs/doxygen/CMakeLists.txt b/docs/doxygen/CMakeLists.txt index d11c214804..5e4bff52b6 100644 --- a/docs/doxygen/CMakeLists.txt +++ b/docs/doxygen/CMakeLists.txt @@ -6,10 +6,11 @@ add_custom_target(docs) if (DOXYGEN_FOUND AND DOXYGEN_DOT_FOUND) configure_file(Doxyfile.in Doxyfile @ONLY) add_custom_target(doxygen - COMMAND ${DOXYGEN_EXECUTABLE} Doxyfile - COMMENT "Generating Doxygen Documentation") + COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile + COMMENT "Generating Doxygen Documentation" + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) add_dependencies(docs doxygen) - install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html/ DESTINATION share/doc/aktualizr OPTIONAL) + install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html/ DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/doc/aktualizr OPTIONAL) else(DOXYGEN_FOUND AND DOXYGEN_DOT_FOUND) message(WARNING "doxygen + dot not found, skipping") endif(DOXYGEN_FOUND AND DOXYGEN_DOT_FOUND) diff --git a/docs/doxygen/Doxyfile.in b/docs/doxygen/Doxyfile.in index a5179f0af8..ef573acefc 100644 --- a/docs/doxygen/Doxyfile.in +++ b/docs/doxygen/Doxyfile.in @@ -1,4 +1,4 @@ -# Doxyfile 1.8.11 +# Doxyfile 1.8.16 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. @@ -17,11 +17,11 @@ # Project related configuration options #--------------------------------------------------------------------------- -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all text -# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv -# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv -# for the list of possible encodings. +# This tag specifies the encoding used for all characters in the configuration +# file that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# https://www.gnu.org/software/libiconv/ for the list of possible encodings. # The default value is: UTF-8. DOXYFILE_ENCODING = UTF-8 @@ -58,7 +58,7 @@ PROJECT_LOGO = # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. -OUTPUT_DIRECTORY = +OUTPUT_DIRECTORY = @CMAKE_CURRENT_BINARY_DIR@ # If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and @@ -236,7 +236,12 @@ TAB_SIZE = 2 # will allow you to put the command \sideeffect (or @sideeffect) in the # documentation, which will result in a user-defined paragraph with heading # "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines. +# newlines (in the resulting output). You can put ^^ in the value part of an +# alias to insert a newline as if a physical newline was in the original file. +# When you need a literal { or } or , in the value part of an alias you have to +# escape them by means of a backslash (\), this can lead to conflicts with the +# commands \{ and \} for these it is advised to use the version @{ and @} or use +# a double escape (\\{ and \\}) ALIASES = "verify{1}=\xrefitem verify \"Verifies requirement\" \"Requirement Verification\" \1" \ "satisfy{1}=\xrefitem satisfy \"Satisfies requirement\" \"Requirement Implementation\" \1" \ @@ -284,12 +289,13 @@ OPTIMIZE_OUTPUT_VHDL = NO # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and # language is one of the parsers supported by doxygen: IDL, Java, Javascript, -# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: -# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: -# Fortran. In the later case the parser tries to guess whether the code is fixed -# or free formatted code, this is the default for Fortran type files), VHDL. For -# instance to make doxygen treat .inc files as Fortran files (default is PHP), -# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, +# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: +# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser +# tries to guess whether the code is fixed or free formatted code, this is the +# default for Fortran type files), VHDL, tcl. For instance to make doxygen treat +# .inc files as Fortran files (default is PHP), and .f files as C (default is +# Fortran), use: inc=Fortran f=C. # # Note: For files without extension you can use no_extension as a placeholder. # @@ -300,7 +306,7 @@ EXTENSION_MAPPING = # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. +# documentation. See https://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you can # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in # case of backward compatibilities issues. @@ -312,7 +318,7 @@ MARKDOWN_SUPPORT = YES # to that level are automatically included in the table of contents, even if # they do not have an id attribute. # Note: This feature currently applies only to Markdown headings. -# Minimum value: 0, maximum value: 99, default value: 0. +# Minimum value: 0, maximum value: 99, default value: 5. # This tag requires that the tag MARKDOWN_SUPPORT is set to YES. TOC_INCLUDE_HEADINGS = 0 @@ -342,7 +348,7 @@ BUILTIN_STL_SUPPORT = YES CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen # will parse them like normal C++ but will assume all classes use public instead # of private inheritance when no explicit protection keyword is present. # The default value is: NO. @@ -526,7 +532,7 @@ INTERNAL_DOCS = NO # names in lower-case letters. If set to YES, upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. +# (including Cygwin) ands Mac users are advised to set this option to NO. # The default value is: system dependent. CASE_SENSE_NAMES = NO @@ -713,7 +719,7 @@ LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files containing # the reference definitions. This must be a list of .bib files. The .bib # extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. # For LaTeX the style of the bibliography can be controlled using # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the # search path. See also \cite for info how to create references. @@ -758,7 +764,8 @@ WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return # value. If set to NO, doxygen will only warn about wrong or incomplete -# parameter documentation, but not about the absence of documentation. +# parameter documentation, but not about the absence of documentation. If +# EXTRACT_ALL is set to YES then this flag will automatically be disabled. # The default value is: NO. WARN_NO_PARAMDOC = NO @@ -797,10 +804,10 @@ WARN_LOGFILE = INPUT = @CMAKE_SOURCE_DIR@/docs \ @CMAKE_SOURCE_DIR@/docs/doxygen \ - @CMAKE_SOURCE_DIR@/tests \ + @CMAKE_SOURCE_DIR@/include \ + @CMAKE_SOURCE_DIR@/include/libaktualizr \ @CMAKE_SOURCE_DIR@/src/aktualizr_get \ @CMAKE_SOURCE_DIR@/src/aktualizr_info \ - @CMAKE_SOURCE_DIR@/src/aktualizr_lite \ @CMAKE_SOURCE_DIR@/src/aktualizr_primary \ @CMAKE_SOURCE_DIR@/src/aktualizr_secondary \ @CMAKE_SOURCE_DIR@/src/cert_provider \ @@ -811,29 +818,28 @@ INPUT = @CMAKE_SOURCE_DIR@/docs \ @CMAKE_SOURCE_DIR@/src/libaktualizr-posix/asn1/messages \ @CMAKE_SOURCE_DIR@/src/libaktualizr/bootloader \ @CMAKE_SOURCE_DIR@/src/libaktualizr/bootstrap \ + @CMAKE_SOURCE_DIR@/src/libaktualizr/campaign \ @CMAKE_SOURCE_DIR@/src/libaktualizr/config \ @CMAKE_SOURCE_DIR@/src/libaktualizr/crypto \ @CMAKE_SOURCE_DIR@/src/libaktualizr/http \ - @CMAKE_SOURCE_DIR@/src/libaktualizr/isotp_conn \ @CMAKE_SOURCE_DIR@/src/libaktualizr/logging \ @CMAKE_SOURCE_DIR@/src/libaktualizr/package_manager \ @CMAKE_SOURCE_DIR@/src/libaktualizr/primary \ - @CMAKE_SOURCE_DIR@/src/libaktualizr/socket_activation \ @CMAKE_SOURCE_DIR@/src/libaktualizr/storage \ @CMAKE_SOURCE_DIR@/src/libaktualizr/telemetry \ @CMAKE_SOURCE_DIR@/src/libaktualizr/uptane \ @CMAKE_SOURCE_DIR@/src/libaktualizr/utilities \ - @CMAKE_SOURCE_DIR@/src/load_tests \ @CMAKE_SOURCE_DIR@/src/sota_tools \ @CMAKE_SOURCE_DIR@/src/uptane_generator \ @CMAKE_SOURCE_DIR@/src/virtual_secondary \ - @CMAKE_SOURCE_DIR@/CONTRIBUTING.md \ - @CMAKE_SOURCE_DIR@/CHANGELOG.md + @CMAKE_SOURCE_DIR@/tests \ + @CMAKE_SOURCE_DIR@/CHANGELOG.md \ + @CMAKE_SOURCE_DIR@/CONTRIBUTING.md # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# documentation (see: https://www.gnu.org/software/libiconv/) for the list of # possible encodings. # The default value is: UTF-8. @@ -851,7 +857,7 @@ INPUT_ENCODING = UTF-8 # *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, # *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, # *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, -# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf. +# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, *.qsf and *.ice. FILE_PATTERNS = *.c \ *.cc \ @@ -864,11 +870,20 @@ FILE_PATTERNS = *.c \ *.ipp \ *.i++ \ *.inl \ + *.idl \ + *.ddl \ + *.odl \ *.h \ *.hh \ *.hxx \ *.hpp \ *.h++ \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ *.inc \ *.m \ *.markdown \ @@ -876,12 +891,19 @@ FILE_PATTERNS = *.c \ *.mm \ *.dox \ *.py \ + *.pyw \ *.f90 \ + *.f95 \ + *.f03 \ + *.f08 \ *.f \ *.for \ *.tcl \ *.vhd \ - *.js + *.vhdl \ + *.ucf \ + *.qsf \ + *.ice # The RECURSIVE tag can be used to specify whether or not subdirectories should # be searched for input files as well. @@ -1034,7 +1056,7 @@ INLINE_SOURCES = NO STRIP_CODE_COMMENTS = NO # If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# function all documented functions referencing it will be listed. +# entity all documented functions referencing it will be listed. # The default value is: NO. REFERENCED_BY_RELATION = NO @@ -1066,12 +1088,12 @@ SOURCE_TOOLTIPS = YES # If the USE_HTAGS tag is set to YES then the references to source code will # point to the HTML generated by the htags(1) tool instead of doxygen built-in # source browser. The htags tool is part of GNU's global source tagging system -# (see http://www.gnu.org/software/global/global.html). You will need version +# (see https://www.gnu.org/software/global/global.html). You will need version # 4.8.6 or higher. # # To use it do the following: # - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file # - Make sure the INPUT points to the root of the source tree # - Run doxygen as normal # @@ -1099,7 +1121,7 @@ VERBATIM_HEADERS = YES # rich C++ code for which doxygen's built-in parser lacks the necessary type # information. # Note: The availability of this option depends on whether or not doxygen was -# generated with the -Duse-libclang=ON option for CMake. +# generated with the -Duse_libclang=ON option for CMake. # The default value is: NO. CLANG_ASSISTED_PARSING = NO @@ -1230,7 +1252,7 @@ HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to # this color. Hue is specified as an angle on a colorwheel, see -# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# https://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 # purple, and 360 is red again. # Minimum value: 0, maximum value: 359, default value: 220. @@ -1289,13 +1311,13 @@ HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: http://developer.apple.com/tools/xcode/), introduced with -# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# environment (see: https://developer.apple.com/xcode/), introduced with OSX +# 10.5 (Leopard). To create a documentation set, doxygen will generate a # Makefile in the HTML output directory. Running make will produce the docset in # that directory and running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. +# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy +# genXcode/_index.html for more information. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. @@ -1334,7 +1356,7 @@ DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on # Windows. # # The HTML Help Workshop contains a compiler that can convert all HTML output @@ -1410,7 +1432,7 @@ QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace -# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1418,7 +1440,7 @@ QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual- # folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1427,7 +1449,7 @@ QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1435,7 +1457,7 @@ QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1443,7 +1465,7 @@ QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's filter section matches. Qt Help Project / Filter Attributes (see: -# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = @@ -1536,7 +1558,7 @@ EXT_LINKS_IN_WINDOW = NO FORMULA_FONTSIZE = 10 -# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# Use the FORMULA_TRANSPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are not # supported properly for IE 6.0, but are supported on all modern browsers. # @@ -1548,7 +1570,7 @@ FORMULA_FONTSIZE = 10 FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# http://www.mathjax.org) which uses client side Javascript for the rendering +# https://www.mathjax.org) which uses client side Javascript for the rendering # instead of using pre-rendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path @@ -1575,11 +1597,11 @@ MATHJAX_FORMAT = HTML-CSS # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of -# MathJax from http://www.mathjax.org before deployment. -# The default value is: http://cdn.mathjax.org/mathjax/latest. +# MathJax from https://www.mathjax.org before deployment. +# The default value is: https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/. # This tag requires that the tag USE_MATHJAX is set to YES. -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest +MATHJAX_RELPATH = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/ # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example @@ -1637,7 +1659,7 @@ SERVER_BASED_SEARCH = NO # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: http://xapian.org/). +# Xapian (see: https://xapian.org/). # # See the section "External Indexing and Searching" for details. # The default value is: NO. @@ -1650,7 +1672,7 @@ EXTERNAL_SEARCH = NO # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: http://xapian.org/). See the section "External Indexing and +# Xapian (see: https://xapian.org/). See the section "External Indexing and # Searching" for details. # This tag requires that the tag SEARCHENGINE is set to YES. @@ -1702,21 +1724,15 @@ LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. # -# Note that when enabling USE_PDFLATEX this option is only used for generating -# bitmaps for formulas in the HTML output, but not in the Makefile that is -# written to the output directory. -# The default file is: latex. +# Note that when not enabling USE_PDFLATEX the default is latex when enabling +# USE_PDFLATEX the default is pdflatex and when in the later case latex is +# chosen this is overwritten by pdflatex. For specific output languages the +# default can have been set differently, this depends on the implementation of +# the output language. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_CMD_NAME = latex -# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate -# index for LaTeX. -# The default file is: makeindex. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -MAKEINDEX_CMD_NAME = makeindex - # If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX # documents. This may be useful for small projects and may help to save some # trees in general. @@ -1837,7 +1853,7 @@ LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. See -# http://en.wikipedia.org/wiki/BibTeX and \cite for more info. +# https://en.wikipedia.org/wiki/BibTeX and \cite for more info. # The default value is: plain. # This tag requires that the tag GENERATE_LATEX is set to YES. @@ -1890,9 +1906,9 @@ COMPACT_RTF = NO RTF_HYPERLINKS = NO -# Load stylesheet definitions from file. Syntax is similar to doxygen's config -# file, i.e. a series of assignments. You only have to provide replacements, -# missing definitions are set to their default value. +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# configuration file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. # # See also section "Doxygen usage" for information on how to generate the # default style sheet that doxygen normally uses. @@ -1901,8 +1917,8 @@ RTF_HYPERLINKS = NO RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an RTF document. Syntax is -# similar to doxygen's config file. A template extensions file can be generated -# using doxygen -e rtf extensionFile. +# similar to doxygen's configuration file. A template extensions file can be +# generated using doxygen -e rtf extensionFile. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_EXTENSIONS_FILE = @@ -2020,9 +2036,9 @@ DOCBOOK_PROGRAMLISTING = NO #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an -# AutoGen Definitions (see http://autogen.sf.net) file that captures the -# structure of the code including all documentation. Note that this feature is -# still experimental and incomplete at the moment. +# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures +# the structure of the code including all documentation. Note that this feature +# is still experimental and incomplete at the moment. # The default value is: NO. GENERATE_AUTOGEN_DEF = NO @@ -2189,12 +2205,6 @@ EXTERNAL_GROUPS = YES EXTERNAL_PAGES = YES -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of 'which perl'). -# The default file (with absolute path) is: /usr/bin/perl. - -PERL_PATH = /usr/bin/perl - #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- @@ -2208,15 +2218,6 @@ PERL_PATH = /usr/bin/perl CLASS_DIAGRAMS = YES -# You can define message sequence charts within doxygen comments using the \msc -# command. Doxygen will then run the mscgen tool (see: -# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the -# documentation. The MSCGEN_PATH tag allows you to specify the directory where -# the mscgen tool resides. If left empty the tool is assumed to be found in the -# default search path. - -MSCGEN_PATH = - # You can include diagrams made with dia in doxygen documentation. Doxygen will # then run dia to produce the diagram and insert it in the documentation. The # DIA_PATH tag allows you to specify the directory where the dia binary resides. diff --git a/docs/doxygen/INDEX.md b/docs/doxygen/INDEX.md index 51da9dc4dd..78267bc3a8 100644 --- a/docs/doxygen/INDEX.md +++ b/docs/doxygen/INDEX.md @@ -15,4 +15,4 @@ The client is responsible for the following tasks: The aktualizr client application is a thin wrapper around the client library "libaktualizr". You could regard this library as a kind of toolbox. You can use the parts in this library to build a software update solution that conforms to the Uptane standard. -For all controllers that run aktualizr or include libaktualizr, you’ll need to implement some form of key provisioning. The OTA Connect documentation explains in detail how to [select a provisioning method](https://docs.ota.here.com/ota-client/dev/client-provisioning-methods.html) that suits your use case. For more information on how you can use this library, also see the [reference docs](https://github.com/advancedtelematic/aktualizr/tree/master/docs). +For all controllers that run aktualizr or include libaktualizr, you’ll need to implement some form of key provisioning. The OTA Connect documentation explains in detail how to [select a provisioning method](https://docs.ota.here.com/ota-client/latest/client-provisioning-methods.html) that suits your use case. For more information on how you can use this library, also see the [reference docs](https://github.com/uptane/aktualizr/tree/master/docs). diff --git a/docs/ecu_events.adoc b/docs/ecu_events.adoc deleted file mode 120000 index 2dbc02d89d..0000000000 --- a/docs/ecu_events.adoc +++ /dev/null @@ -1 +0,0 @@ -./ota-client-guide/modules/ROOT/pages/ecu_events.adoc \ No newline at end of file diff --git a/docs/fault-injection.adoc b/docs/fault-injection.adoc deleted file mode 120000 index 802e0a0c42..0000000000 --- a/docs/fault-injection.adoc +++ /dev/null @@ -1 +0,0 @@ -./ota-client-guide/modules/ROOT/pages/fault-injection.adoc \ No newline at end of file diff --git a/docs/integrate-libaktualizr.adoc b/docs/integrate-libaktualizr.adoc deleted file mode 120000 index 504d0bda4d..0000000000 --- a/docs/integrate-libaktualizr.adoc +++ /dev/null @@ -1 +0,0 @@ -./ota-client-guide/modules/ROOT/pages/libaktualizr-why-use.adoc \ No newline at end of file diff --git a/docs/ota-client-guide/antora.yml b/docs/ota-client-guide/antora.yml index fec8d65e2d..d4a0468097 100644 --- a/docs/ota-client-guide/antora.yml +++ b/docs/ota-client-guide/antora.yml @@ -1,5 +1,6 @@ name: ota-client title: OTA Connect Developer Guide version: latest +display_version: 2020.10 (latest) nav: - modules/ROOT/nav.adoc diff --git a/docs/ota-client-guide/modules/ROOT/assets/attachments/aktualizr-monitoring-zabbix-template.xml b/docs/ota-client-guide/modules/ROOT/assets/attachments/aktualizr-monitoring-zabbix-template.xml new file mode 100644 index 0000000000..c983bb3ad2 --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/assets/attachments/aktualizr-monitoring-zabbix-template.xml @@ -0,0 +1,309 @@ + + + 4.0 + 2020-02-13T10:13:43Z + + + Templates/Applications + + + + + + + + aktualizr.rss.memory.usage.graph + 900 + 200 + 0.0000 + 100.0000 + 1 + 1 + 0 + 1 + 0 + 0.0000 + 0.0000 + 0 + 0 + 0 + 0 + + + 0 + 0 + 1A7C11 + 0 + 2 + 0 + + Aktualizr Client + proc.mem[aktualizr,,,,rss] + + + + + + aktualizr.threads.count.graph + 900 + 200 + 0.0000 + 100.0000 + 1 + 1 + 0 + 1 + 0 + 0.0000 + 0.0000 + 0 + 0 + 0 + 0 + + + 0 + 0 + 1A7C11 + 0 + 2 + 0 + + Aktualizr Client + aktualizr.threads.count + + + + + + vfs.dev.write.bytes.graph + 900 + 200 + 0.0000 + 100.0000 + 1 + 1 + 0 + 1 + 0 + 0.0000 + 0.0000 + 0 + 0 + 0 + 0 + + + 0 + 0 + 1A7C11 + 0 + 2 + 0 + + Aktualizr Client + vfs.dev.write[,sectors] + + + + + + diff --git a/docs/ota-client-guide/modules/ROOT/assets/images/clion-debugger.png b/docs/ota-client-guide/modules/ROOT/assets/images/clion-debugger.png deleted file mode 100644 index 9ffc615c36..0000000000 Binary files a/docs/ota-client-guide/modules/ROOT/assets/images/clion-debugger.png and /dev/null differ diff --git a/docs/ota-client-guide/modules/ROOT/assets/images/dual-bank-system-update-flow.svg b/docs/ota-client-guide/modules/ROOT/assets/images/dual-bank-system-update-flow.svg deleted file mode 100644 index 8907db0455..0000000000 --- a/docs/ota-client-guide/modules/ROOT/assets/images/dual-bank-system-update-flow.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/docs/ota-client-guide/modules/ROOT/assets/images/ostree-update-flow.svg b/docs/ota-client-guide/modules/ROOT/assets/images/ostree-update-flow.svg deleted file mode 100644 index 36ea27d7ac..0000000000 --- a/docs/ota-client-guide/modules/ROOT/assets/images/ostree-update-flow.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/docs/ota-client-guide/modules/ROOT/assets/images/prov-diff-devices.png b/docs/ota-client-guide/modules/ROOT/assets/images/prov-diff-devices.png deleted file mode 100644 index d3ef5b66c2..0000000000 Binary files a/docs/ota-client-guide/modules/ROOT/assets/images/prov-diff-devices.png and /dev/null differ diff --git a/docs/ota-client-guide/modules/ROOT/assets/images/prov-diff-infra.png b/docs/ota-client-guide/modules/ROOT/assets/images/prov-diff-infra.png deleted file mode 100644 index 492c921669..0000000000 Binary files a/docs/ota-client-guide/modules/ROOT/assets/images/prov-diff-infra.png and /dev/null differ diff --git a/docs/ota-client-guide/modules/ROOT/assets/images/s1-prov.png b/docs/ota-client-guide/modules/ROOT/assets/images/s1-prov.png deleted file mode 100644 index 3cc8dd30a1..0000000000 Binary files a/docs/ota-client-guide/modules/ROOT/assets/images/s1-prov.png and /dev/null differ diff --git a/docs/ota-client-guide/modules/ROOT/assets/images/s5-install_device.png b/docs/ota-client-guide/modules/ROOT/assets/images/s5-install_device.png deleted file mode 100644 index 730bcd5fc5..0000000000 Binary files a/docs/ota-client-guide/modules/ROOT/assets/images/s5-install_device.png and /dev/null differ diff --git a/docs/ota-client-guide/modules/ROOT/assets/images/screenshot_provisioning_key_2.png b/docs/ota-client-guide/modules/ROOT/assets/images/screenshot_provisioning_key_2.png deleted file mode 100644 index ccfe96a229..0000000000 Binary files a/docs/ota-client-guide/modules/ROOT/assets/images/screenshot_provisioning_key_2.png and /dev/null differ diff --git a/docs/ota-client-guide/modules/ROOT/assets/images/security-channels.png b/docs/ota-client-guide/modules/ROOT/assets/images/security-channels.png deleted file mode 100644 index 7f873242de..0000000000 Binary files a/docs/ota-client-guide/modules/ROOT/assets/images/security-channels.png and /dev/null differ diff --git a/docs/ota-client-guide/modules/ROOT/examples/sota-local.toml b/docs/ota-client-guide/modules/ROOT/examples/sota-local.toml index 4708d5b0cb..6134ddb63d 100644 --- a/docs/ota-client-guide/modules/ROOT/examples/sota-local.toml +++ b/docs/ota-client-guide/modules/ROOT/examples/sota-local.toml @@ -1,6 +1,8 @@ [provision] provision_path = "credentials.zip" primary_ecu_hardware_id = "local-fake" +# This setting is for testing purposes only. Do not use in a real device. +mode = "SharedCredReuse" [logger] loglevel = 1 @@ -10,6 +12,7 @@ path = "storage" [pacman] type = "none" +images_path = "storage/images" [uptane] secondary_config_file = "virtualsec.json" diff --git a/docs/ota-client-guide/modules/ROOT/examples/start.sh b/docs/ota-client-guide/modules/ROOT/examples/start.sh index 36c2e22398..b941e67792 100644 --- a/docs/ota-client-guide/modules/ROOT/examples/start.sh +++ b/docs/ota-client-guide/modules/ROOT/examples/start.sh @@ -4,7 +4,8 @@ set -euo pipefail readonly KUBECTL=${KUBECTL:-kubectl} -readonly CWD=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +CWD=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +readonly CWD readonly DNS_NAME=${DNS_NAME:-ota.local} export SERVER_NAME=${SERVER_NAME:-ota.ce} readonly SERVER_DIR=${SERVER_DIR:-${CWD}/../generated/${SERVER_NAME}} @@ -28,7 +29,7 @@ check_dependencies() { retry_command() { local name=${1} - local command=${@:2} + local command=${*:2} local n=0 local max=100 while true; do @@ -54,7 +55,7 @@ wait_for_pods() { print_hosts() { retry_command "ingress" "${KUBECTL} get ingress -o json \ | jq --exit-status '.items[0].status.loadBalancer.ingress'" - ${KUBECTL} get ingress --no-headers | awk -v ip=$(minikube ip) '{print ip " " $2}' + ${KUBECTL} get ingress --no-headers | awk -v ip="$(minikube ip)" '{print ip " " $2}' } kill_pid() { @@ -71,7 +72,7 @@ skip_ingress() { config/resources.yaml \ config/secrets.yaml \ $local_yaml | grep ^create_ingress | tail -n1) - echo $value | grep "false" + echo "$value" | grep "false" } make_template() { @@ -86,7 +87,7 @@ make_template() { --values config/images.yaml \ --values config/resources.yaml \ --values config/secrets.yaml \ - ${extra} \ + "${extra}" \ --output "${output}" } @@ -125,7 +126,7 @@ new_client() { ${KUBECTL} proxy --port "${PROXY_PORT}" & local pid=$! - trap "kill_pid ${pid}" EXIT + trap 'kill_pid ${pid}' EXIT sleep 3s local api="http://localhost:${PROXY_PORT}/api/v1/namespaces/${NAMESPACE}/services" @@ -141,9 +142,9 @@ new_client() { local port=${DEVICE_PORT:-2222} local options="-o StrictHostKeyChecking=no" - ssh ${options} "root@${addr}" -p "${port}" "echo \"${gateway} ota.ce\" >> /etc/hosts" - scp -P "${port}" ${options} "${device_dir}/client.pem" "root@${addr}:/var/sota/client.pem" - scp -P "${port}" ${options} "${device_dir}/pkey.pem" "root@${addr}:/var/sota/pkey.pem" + ssh "${options}" "root@${addr}" -p "${port}" "echo \"${gateway} ota.ce\" >> /etc/hosts" + scp -P "${port}" "${options}" "${device_dir}/client.pem" "root@${addr}:/var/sota/client.pem" + scp -P "${port}" "${options}" "${device_dir}/pkey.pem" "root@${addr}:/var/sota/pkey.pem" } new_server() { @@ -234,7 +235,7 @@ start_vaults() { pod=$(wait_for_pods "${vault}") ${KUBECTL} port-forward "${pod}" "${PROXY_PORT}:${PROXY_PORT}" & local pid=$! - trap "kill_pid ${pid}" EXIT + trap 'kill_pid ${pid}' EXIT sleep 3s init_vault "${vault}" @@ -248,7 +249,8 @@ start_vaults() { start_weave() { [[ ${SKIP_WEAVE} == true ]] && return 0; - local version=$(${KUBECTL} version | base64 | tr -d '\n') + local version + version=$(${KUBECTL} version | base64 | tr -d '\n') ${KUBECTL} apply -f "https://cloud.weave.works/k8s/net?k8s-version=${version}" } @@ -273,7 +275,7 @@ get_credentials() { ${KUBECTL} proxy --port "${PROXY_PORT}" & local pid=$! - trap "kill_pid ${pid}" EXIT + trap 'kill_pid ${pid}' EXIT sleep 3s local namespace="x-ats-namespace:default" @@ -296,13 +298,13 @@ get_credentials() { retry_command "keys" "http --ignore-stdin --check-status GET ${keyserver}/api/v1/root/${id}" keys=$(http --ignore-stdin --check-status GET "${keyserver}/api/v1/root/${id}/keys/targets/pairs") - echo ${keys} | jq '.[0] | {keytype, keyval: {public: .keyval.public}}' > "${SERVER_DIR}/targets.pub" - echo ${keys} | jq '.[0] | {keytype, keyval: {private: .keyval.private}}' > "${SERVER_DIR}/targets.sec" + echo "${keys}" | jq '.[0] | {keytype, keyval: {public: .keyval.public}}' > "${SERVER_DIR}/targets.pub" + echo "${keys}" | jq '.[0] | {keytype, keyval: {private: .keyval.private}}' > "${SERVER_DIR}/targets.sec" retry_command "root.json" "http --ignore-stdin --check-status -d GET \ ${reposerver}/api/v1/user_repo/root.json \"${namespace}\"" && \ http --ignore-stdin --check-status -d -o "${SERVER_DIR}/root.json" GET \ - ${reposerver}/api/v1/user_repo/root.json "${namespace}" + "${reposerver}"/api/v1/user_repo/root.json "${namespace}" echo "http://tuf-reposerver.${DNS_NAME}" > "${SERVER_DIR}/tufrepo.url" echo "https://${SERVER_NAME}:30443" > "${SERVER_DIR}/autoprov.url" @@ -315,7 +317,7 @@ get_credentials() { } END - zip --quiet --junk-paths ${SERVER_DIR}/{credentials.zip,autoprov.url,server_ca.pem,tufrepo.url,targets.pub,targets.sec,treehub.json,root.json} + zip --quiet --junk-paths "${SERVER_DIR}"/{credentials.zip,autoprov.url,server_ca.pem,tufrepo.url,targets.pub,targets.sec,treehub.json,root.json} kill_pid "${pid}" ${KUBECTL} create secret generic "user-keys" --from-literal="id=${id}" --from-literal="keys=${keys}" @@ -323,7 +325,7 @@ END [ $# -lt 1 ] && { echo "Usage: $0 []"; exit 1; } -command=$(echo "${1}" | sed 's/-/_/g') +command="${1//-/_}" case "${command}" in "start_all") diff --git a/docs/ota-client-guide/modules/ROOT/examples/virtualsec.json b/docs/ota-client-guide/modules/ROOT/examples/virtualsec.json index 98a5871a7b..a2d8b3f0e4 100644 --- a/docs/ota-client-guide/modules/ROOT/examples/virtualsec.json +++ b/docs/ota-client-guide/modules/ROOT/examples/virtualsec.json @@ -1,7 +1,7 @@ { "virtual": [ { - "partial_verifying": "false", + "partial_verifying": false, "ecu_hardware_id": "demo-virtual", "full_client_dir": "storage/demo-vsec1", "ecu_private_key": "sec.private", @@ -11,7 +11,7 @@ "metadata_path": "storage/demo-vsec1/metadata" }, { - "partial_verifying": "false", + "partial_verifying": false, "ecu_hardware_id": "demo-virtual", "full_client_dir": "storage/demo-vsec2", "ecu_private_key": "sec.private", @@ -22,4 +22,4 @@ } ] -} \ No newline at end of file +} diff --git a/docs/ota-client-guide/modules/ROOT/nav.adoc b/docs/ota-client-guide/modules/ROOT/nav.adoc index 59173e2863..e3aef5915e 100644 --- a/docs/ota-client-guide/modules/ROOT/nav.adoc +++ b/docs/ota-client-guide/modules/ROOT/nav.adoc @@ -1,4 +1,4 @@ -// MC: NOTE ABOUT TOC +// MC: NOTE ABOUT TOC // Adding "pageroot" attr so that TOC that will also work directly in GitHub. Because... // In Antora the "pages" subdir is implcit added to the xref path at build time. // if you add "/pages" Antora will intepret it as "pages/pages". @@ -9,24 +9,26 @@ ifndef::env-github[:pageroot:] .Introduction to the Developer Tools * xref:{pageroot}index.adoc[Introduction] -* xref:{pageroot}developer-tools.adoc[Developer Tools] +* xref:{pageroot}developer-tools.adoc[Developer tools] * xref:{pageroot}workflow-overview.adoc[Basic OTA update workflow] -* xref:{pageroot}evaluation-to-prod.adoc[Moving from Evaluation to Production] +* xref:{pageroot}evaluation-to-prod.adoc[Moving from evaluation to production] // NEW topics .Key Concepts // NEW/updated topics -* xref:{pageroot}software-management.adoc[Software Management] +* xref:{pageroot}software-management.adoc[Software management] ** xref:{pageroot}supporting-technologies.adoc[Supporting technologies] ** xref:{pageroot}yocto.adoc[Yocto] // --- -** xref:{pageroot}ostree-and-treehub.adoc[OSTree, and TreeHub] +** xref:{pageroot}ostree-and-treehub.adoc[OSTree and TreeHub] +** xref:{pageroot}comparing-full-filesystem-update-strategies.adoc[Comparing full-filesystem update strategies] // --- * xref:{pageroot}security.adoc[Security] -** xref:{pageroot}pki.adoc[Key Management] +** xref:{pageroot}pki.adoc[Key management] +** xref:{pageroot}client-provisioning-methods.adoc[Device provisioning] ** xref:{pageroot}uptane.adoc[The Uptane security specification] // future iteration: * xref:{pageroot}prod-intro[Testing and production environments] -* xref:{pageroot}client-provisioning-methods.adoc[Device provisioning] + .Evaluate OTA Connect * xref:{pageroot}intro-evaluate.adoc[Evaluating OTA Connect] @@ -34,39 +36,49 @@ ifndef::env-github[:pageroot:] * xref:{pageroot}build-images.adoc[Use our sample recipes to build disk images] ** xref:{pageroot}build-raspberry.adoc[Build for a Raspberry Pi] ** xref:{pageroot}build-qemu.adoc[Build for QEMU] -// OTA-3629: Uncomment when AGL ready:** xref:{pageroot}build-agl.adoc[Build for Automotive Grade Linux] +** xref:{pageroot}build-agl.adoc[Build for Automotive Grade Linux] * xref:{pageroot}simulate-device-basic.adoc[Simulate a device without building a disk image] -* xref:{pageroot}pushing-updates.adoc[Upload a sample software version] +* xref:{pageroot}pushing-updates.adoc[Add software to your Yocto image] * xref:{pageroot}update-single-device.adoc[Update a second device with the sample software] -.Build your own OTA-enabled solution -* xref:{pageroot}intro-prep.adoc[Recommended Steps] +.Integrate OTA Connect +* xref:{pageroot}intro-prep.adoc[Recommended steps] * xref:{pageroot}recommended-clientconfig.adoc[Recommended configurations] -* xref:{pageroot}account-setup.adoc[Set up multiple accounts] +* xref:{pageroot}add-environments.adoc[Set up additional environments] * xref:{pageroot}libaktualizr-why-use.adoc[Integrate libaktualizr into your solution] -** xref:{pageroot}libaktualizr-getstarted.adoc[Get Started with libaktualizr] +** xref:{pageroot}libaktualizr-getstarted.adoc[Get started with libaktualizr] ** xref:{pageroot}libaktualizr-update-secondary.adoc[Updating a Secondary ECU with libaktualizr] * xref:{pageroot}build-ota-enabled-images.adoc[Build and deploy OTA-enabled disk images] ** xref:{pageroot}supported-boards.adoc[Supported boards] +** xref:{pageroot}yocto-release-branches.adoc[Yocto release branches] ** xref:{pageroot}add-ota-functonality-existing-yocto-project.adoc[Add OTA functionality to a Yocto project] ** xref:{pageroot}libaktualizr-integrate.adoc[Add libaktualizr integration to a Yocto project] -** xref:{pageroot}yocto-release-branches.adoc[Yocto release branches] + +* xref:{pageroot}bsp-integration.adoc[Add support for new hardware] +** xref:{pageroot}add-board-class.adoc[Adding a board class] +** xref:{pageroot}setup-boot-image-for-ostree.adoc[Set up boot image layout for OSTree compatibility] +** xref:{pageroot}add-meta-updater-to-vendors-sdk.adoc[Add meta-updater features to the vendor's SDK] +** xref:{pageroot}troubleshooting-bsp-integration.adoc[Troubleshooting BSP Integration] * xref:{pageroot}device-cred-prov-steps.adoc[Provision devices] -** xref:{pageroot}generate-selfsigned-root.adoc[Generate a self-signed root certificate] -** xref:{pageroot}provide-root-cert.adoc[Register your root certificate] +** xref:{pageroot}generate-selfsigned-root.adoc[Generate a fleet root certificate] +** xref:{pageroot}provide-root-cert.adoc[Register your fleet root certificate] ** xref:{pageroot}generate-devicecert.adoc[Generate device certificates] +** xref:{pageroot}hsm-provisioning-example.adoc[Generate a device certificate using an HSM] ** xref:{pageroot}enable-device-cred-provisioning.adoc[Enable and install device certificates] * xref:{pageroot}secure-software-updates.adoc[Secure your software repository] -** xref:{pageroot}install-garage-sign-deploy.adoc[Install the Garage Deploy tool] -** xref:{pageroot}rotating-signing-keys.adoc[Manage keys for software metadata] +** xref:{pageroot}install-garage-sign-deploy.adoc[Install the garage-deploy tool] +** xref:{pageroot}keep-local-repo-on-external-storage.adoc[Keep your repository on external storage] +** xref:{pageroot}rotating-signing-keys.adoc[Rotate keys for Root and Targets metadata] +** xref:{pageroot}finding-unsigned-metadata.adoc[Find the unsigned Root and Targets metadata] +** xref:{pageroot}change-signature-thresholds.adoc[Change signature thresholds] ** xref:{pageroot}metadata-expiry.adoc[Manage metadata expiry dates] .Deploy your OTA-enabled solution -* xref:{pageroot}deploy-checklist.adoc[Deploying to Production] +* xref:{pageroot}deploy-checklist.adoc[Deploying to production] .How to * xref:{pageroot}cross-deploy-images.adoc[Transfer software to another repository] @@ -76,16 +88,23 @@ ifndef::env-github[:pageroot:] * xref:{pageroot}build-only-ostree.adoc[Build only the OSTree part] * xref:{pageroot}rollback.adoc[Set up rollback behavior] * xref:{pageroot}deb-package-install.adoc[Install the client from a deb package] +* xref:{pageroot}upload-large-binary.adoc[Upload a binary file] +* xref:{pageroot}remove-sw-version.adoc[Remove a software version] +* xref:{pageroot}push-images-with-bitbake.adoc[Upload Yocto images using offline credentials] +* xref:{pageroot}virtual-secondaries.adoc[Use Virtual Secondaries] .Reference // MC: Do in second iteration: * xref:{pageroot}otaconnect-identifiers.adoc[Identifiers] -* xref:{pageroot}aktualizr-config-options.adoc[Client Configuration Options] -* xref:{pageroot}build-configuration.adoc[Build Configuration Options] -* xref:{pageroot}aktualizr-runningmodes-finegrained-commandline-control.adoc[Client Commands] +* xref:{pageroot}aktualizr-config-options.adoc[Client configuration options] +* xref:{pageroot}build-configuration.adoc[Build configuration options] +* xref:{pageroot}garage-sign-reference.adoc[Garage-sign commands and options] +* xref:{pageroot}customise-targets-metadata.adoc[Customize Targets metadata] +* xref:{pageroot}aktualizr-runningmodes-finegrained-commandline-control.adoc[Client commands] * xref:{pageroot}provisioning-methods-and-credentialszip.adoc[Contents of the credentials file] * xref:{pageroot}useful-bitbake-commands.adoc[Bitbake commands] * xref:{pageroot}ostree-usage.adoc[OSTree commands] -* xref:{pageroot}ecu_events.adoc[ECU events] +// xref:{pageroot}ecu_events.adoc[ECU events] +* xref:{pageroot}meta-updater-usage.adoc[Advanced usage of meta-updater] .Test and simulate OTA functions * xref:{pageroot}simulate-device-cred-provtest.adoc[Simulate device credentials] @@ -93,13 +112,15 @@ ifndef::env-github[:pageroot:] ** xref:{pageroot}posix-secondaries.adoc[Configuration and emulation on a local host] * xref:{pageroot}fault-injection.adoc[Simulate installation failures for testing] * xref:{pageroot}uptane-generator.adoc[Simulate Uptane metadata transactions] +* xref:{pageroot}device-monitoring-with-zabbix.adoc[Monitor aktualizr resource usage with Zabbix] .Troubleshooting * xref:{pageroot}troubleshooting.adoc[Troubleshooting] +* xref:{pageroot}reporting-problems.adoc[Reporting problems] .For Contributors // Dev-authored topics -* xref:{pageroot}release-process.adoc[Packaging an aktualizr Release on github] +* xref:{pageroot}release-process.adoc[Packaging an aktualizr release on github] * xref:{pageroot}schema-migrations.adoc[Add a schema migration] -* xref:{pageroot}debugging-tips.adoc[Debugging the Client] - +* xref:{pageroot}debugging-tips.adoc[Debugging the client] +* xref:{pageroot}meta-updater-testing.adoc[Testing meta-updater] diff --git a/docs/ota-client-guide/modules/ROOT/pages/_partials/aktualizr-version.adoc b/docs/ota-client-guide/modules/ROOT/pages/_partials/aktualizr-version.adoc new file mode 100644 index 0000000000..bf4ad787fc --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/_partials/aktualizr-version.adoc @@ -0,0 +1,10 @@ +// This file exists only to keep track of the current aktualizr version. +// In the client docs, we want to reference the version appropriate to +// the version being viewed, but when we are referencing aktualizr from +// the other, non-versioned docs, we want to make sure we're using the +// latest version. +:aktualizr-version: 2020.10 + +:yocto-version: 3.1 + +:yocto-branch: dunfell diff --git a/docs/ota-client-guide/modules/ROOT/pages/_partials/build-ota-enabled-images.adoc b/docs/ota-client-guide/modules/ROOT/pages/_partials/build-ota-enabled-images.adoc deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docs/ota-client-guide/modules/ROOT/pages/_partials/config-descriptions.adoc b/docs/ota-client-guide/modules/ROOT/pages/_partials/config-descriptions.adoc index fabfa78c6b..b8da6b60e6 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/_partials/config-descriptions.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/_partials/config-descriptions.adoc @@ -1,9 +1,9 @@ // tag::buildconfig-hint[] -If you are building a disk image that contains aktualizr, your configuration will be the **local.conf** file. +If you build a software image that contains aktualizr, your configuration is the **local.conf** file. -* You'll find this file in the `conf` subdirectory of the of your build project. +You can find this file in the `conf` subdirectory of your build project. -For more information, see the xref:build-configuration.adoc[build configuration reference] and the xref:build-images.adoc[sample build procedures] +For more information, see the xref:build-configuration.adoc[build configuration reference] and the xref:build-images.adoc[sample build procedures]. // end::buildconfig-hint[] @@ -15,7 +15,7 @@ You configure aktualizr by creating and updating a `*.toml` file in one of the f * `/usr/lib/sota/conf.d` * `/etc/sota/conf.d/` -For more information and links to sample configuration files, see the xref:aktualizr-config-options.adoc[client configuration reference]. +For more information and links to sample configuration files, see the xref:ota-client::aktualizr-config-options.adoc[client configuration reference]. // end::clientconfig-hint[] @@ -27,14 +27,14 @@ The default polling internal designed to make it convenient for you test and dev // end::pollconfig-dev[] // tag::autorebootconfig-dev[] -Forces installation completion. Causes a system reboot in case of an ostree package manager. Emulates a reboot in case of a fake package manager. +Forces installation completion. Causes a system reboot when using the OSTree package manager. Emulates a reboot when using the fake package manager. -You'll want to enable this option when developing because it's more convenient. +You may want to enable this option for convenience during development. // end::autorebootconfig-dev[] // tag::pollconfig-prod[] -When moving to production you'll want to have a much longer interval. +When moving to production you'll want to have a much longer interval. In fact, for production, we don't support intervals less the 1 hour (3,600 seconds). Longer internals help you to reduce the internet bandwidth and power consumption for your devices. We recommend an internal between 1 and 7 days (86,400 to 604,800 seconds) @@ -50,13 +50,13 @@ If you followed our recommendation to enable automatic rebooting for development // tag::metadata-expires[] Use this option to have the metadata expire after a fixed date and time. -Specify the time as a UTC instant like in the following example: +Specify the time as a UTC instant. For example: // end::metadata-expires[] // tag::metadata-expireafter[] Use this option to have the metadata expire after an elapsed period of time. -Specify the number of years, months and days like in the following example: +Specify the number of years, months, and days. For example: -// end::metadata-expireafter[] \ No newline at end of file +// end::metadata-expireafter[] diff --git a/docs/ota-client-guide/modules/ROOT/pages/_partials/how-prov-with-device-cred.adoc b/docs/ota-client-guide/modules/ROOT/pages/_partials/how-prov-with-device-cred.adoc index 683638e220..e96beccf50 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/_partials/how-prov-with-device-cred.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/_partials/how-prov-with-device-cred.adoc @@ -1,5 +1,5 @@ -When you provision with device credentials, you install permanent credentials each device *before* the device connects to the OTA Connect server. +When you provision with device credentials, you install permanent credentials each device *before* the device connects to the OTA Connect server. -You would use the xref:pki.adoc[private key for your fleet] to sign the device certificates *and* your fleet root certificate. You then register your fleet root certificate on the OTA Connect server. +You would use the xref:ota-client::pki.adoc[private key for your fleet] to sign the device certificates *and* your fleet root certificate. You then register your fleet root certificate on the OTA Connect server. -Every time a device connects to the OTA Connect server, the server verifies that the device credentials are signed by a trusted source. It does this by comparing public key in the device certificate with the public key in your fleet root certificate. If they are both signed by the same private key, they should match and the device can be trusted. For a more detailed description of how device-credential provisioning works, see the xref:client-provisioning-methods.adoc[provisioning methods overview]. \ No newline at end of file +Every time a device connects to the OTA Connect server, the server verifies that the device credentials are signed by a trusted source. It does this by comparing public key in the device certificate with the public key in your fleet root certificate. If they are both signed by the same private key, they should match and the device can be trusted. For a more detailed description of how device-credential provisioning works, see the xref:ota-client::client-provisioning-methods.adoc[provisioning methods overview]. diff --git a/docs/ota-client-guide/modules/ROOT/pages/_partials/recommended-steps.adoc b/docs/ota-client-guide/modules/ROOT/pages/_partials/recommended-steps.adoc index 097f24d3b5..39c82ba61f 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/_partials/recommended-steps.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/_partials/recommended-steps.adoc @@ -2,32 +2,47 @@ * *Use shared credential provisioning for your devices* + With shared-credential provisioning, you don't have to worry about installing certificates on your devices. -The OTA Connect server automatically does this for you. All you need to do is download a provisioning key, unique to your account, and all your devices can share it. +The OTA Connect server automatically does this for you. All you need to do is download a provisioning key that is unique to your account, and all your devices can share it. -* *Build disk images using our demo recipes* +** xref:ota-client::generating-provisioning-credentials.adoc[Get a provisioning key] + -We support a couple of demo boards "out of the box". You don't need to worry about complex build configurations at this stage. Just follow one of our xref:getstarted::index.adoc[Get Started guides] to learn how to build an OTA-enabled disk image. +{nbsp} + * *Use the standalone aktualizr client to test the OTA functionality* + You don't need to do anything extra to use the standalone aktualizr client. It's actually part of our demo build configurations, so the aktualizr client is included in the disk image that you'll build. + If you prefer to simulate an OTA-enabled device without building a disk image, you can install the aktualizr client on your development computer. In this case, however, you won't be able to try out OSTree functionality--the client will simply download and verify binaries, dropping them into a configurable location on your filesystem. +** xref:ota-client::simulate-device-basic.adoc[Simulate a device without building a disk image] ++ +{nbsp} + +* *Build disk images using our demo recipes* ++ +We support a couple of demo boards "out of the box". You don't need to worry about complex build configurations at this stage. Just follow one of our quick start guides to learn how to build an OTA-enabled disk image. + +** xref:ota-client::build-raspberry.adoc[Build a Raspberry Pi image] +** xref:ota-client::build-qemu.adoc[Build a QEMU/VirtualBox image] + // end::evaluate-steps[] // tag::integrate-steps[] -* *Set up different user logins* +* *Set up different environments* ++ +When you first create an account on the https://connect.ota.here.com[OTA Connect Portal], you get a home xref:ota-web::environments-intro.adoc[environment]--where you can create and manage software update projects. You can also create up to 10 additional environments within your OTA Connect account. This helps because you don't want to mix up test software and production software. + -In OTA Connect, each user account gets its own personal software repositories and device inventory. However, you don't want to mix up test software and production software by putting all of your builds and devices on the same account. +In a proper production workflow, you'll need separate environments to manage the different stages: + -A better strategy for production is to create separate user logins to manage the different stages. For example, you might want a three-step process: +. A developer environment. +. A QA environment. +. A production environment. + -. A developer user such as "dev@acme.com". -. A QA user such as "qa@acme.com". -. A production user such as "prod@acme.com". +In your OTA Connect account, you can easily switch between the different environments and also add your colleagues as members in each of the environments. For instructions on how to manage members, see the xref:ota-web::manage-members.adoc[related] section in the User Guide. + -These logins provide you with a way of clearly separating your development, QA and production resources. +Seperate environments provide you with a convenient way of separating your development, QA, and production resources. + //// COMMENTING OUT UNTIL ORGANIZATIONS STOPS BEING "ALPHA" @@ -54,7 +69,7 @@ In the initial integration phase, if you do not have the TPM/HSM ready to use, y * *Use offline keys to sign software metadata* + -Strong, automotive-grade security using Uptane is one of the most important parts of OTA Connect. Uptane achieves its security benefits by using multiple different, interdependent roles and repositories to sign software, establish trust, and ensure other important security properties. For a truly secure Uptane deployment, the keys that are used to sign metadata should be kept offline--for example, on an external hardware security module like a https://www.yubico.com/[YubiKey]. To save you from the difficulties and complications of managing offline keys, new OTA Connect accounts start off by using online signing keys kept in secure storagefootnote::[Using HashiCorp's https://www.vaultproject.io/[Vault], an industry-standard best practice.]. When you upload software and disk images to OTA Connect, the server generates and signs the associated metadata for you. +Strong, automotive-grade security using Uptane is one of the most important parts of OTA Connect. Uptane achieves its security benefits by using multiple different, interdependent roles and repositories to sign software, establish trust, and ensure other important security properties. For a truly secure Uptane deployment, the keys that are used to sign metadata should be kept offline--for example, on an external hardware security module like a https://www.yubico.com/[YubiKey]. To save you from the difficulties and complications of managing offline keys, new OTA Connect accounts start off by using online signing keys kept in secure storage using HashiCorp's https://www.vaultproject.io/[Vault], an industry-standard best practice. When you upload software and disk images to OTA Connect, the server generates and signs the associated metadata for you. + During the integration phase, you should take the signing keys offline and rotate them. This puts you in complete control over your fleet's security--even if an attacker somehow compromised the entire OTA Connect infrastructure, they still wouldn't be able to install their software on your vehicles. + @@ -96,10 +111,19 @@ Once you understand how the libaktualizr demo application works, you'll want to * Hook into an HMI system to ask for user confirmation before installing an update * Check for safety conditions in the vehicle before starting an update * Only download large updates when the vehicle is connected to WiFi, or at off-peak times -* Pass an update for a secondary ECU off to a custom UDS flashing tool +* Pass an update for a Secondary ECU off to a custom UDS flashing tool * *Build and cross-deploy your disk images to your production account* + After you've finished integration and development, you'll need to move disk images from one account to another. For example, you might want to send a development build that you’re happy with to the QA team, or send that build to the deployment team once it’s passed QA. // end::deploy-steps[] + +// tag::firstbuild-nextstep[] +[TIP] +==== +Once you've built your first image, try adding some new software and xref:ota-client::pushing-updates.adoc[push your update to the OTA Connect server]. +==== +//MC: full versioned xref because snippet is displayed in two components. + +// end::firstbuild-nextstep[] diff --git a/docs/ota-client-guide/modules/ROOT/pages/_partials/why-different-prov-methods.adoc b/docs/ota-client-guide/modules/ROOT/pages/_partials/why-different-prov-methods.adoc index 005e7c1d8b..721e78b7f8 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/_partials/why-different-prov-methods.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/_partials/why-different-prov-methods.adoc @@ -1,6 +1,5 @@ -If you followed the xref:getstarted::get-started.adoc[Get Started Guide], you used a provisioning key that was shared by all devices. In this scenario, the OTA Connect server generates the device credentials for you. This method is fine if you're just evaluating OTA Connect and want to get started quickly. If you want to do some serious testing and eventually move to production, you'll need a switch to a more secure provisioning method. +If you followed the xref:getstarted::get-started.adoc[Get Started Guide], you used a provisioning key that was shared by all devices. In this scenario, the OTA Connect server generates the device credentials for you. This method is fine if you're just evaluating OTA Connect and want to get started quickly. If you want to do some serious testing and eventually move to production, you'll probably want to switch to a more secure provisioning method. -In this case, you shouldn't use the OTA Connect server to generate your device credentials. If you generate *and* validate credentials with the same server, you're taking a big risk. Generation and validation should always be done by separate entities. -Otherwise, if an attacker were able to infiltrate the OTA Connect server, they would be able to provision their own devices +Instead of having OTA Connect generate device certificates for you, you can use your own infrastructure to generate and sign device credentials. We call this method "provisioning with device credentials". -Instead, you should use your own infrastructure to generate device credentials outside of OTA Connect. We call this method "provisioning with device credentials". \ No newline at end of file +TIP: For a more detailed conceptual overview of the difference between the two types of provisioning, read our xref:client-provisioning-methods.adoc[guide to device provisioning]. diff --git a/docs/ota-client-guide/modules/ROOT/pages/account-setup.adoc b/docs/ota-client-guide/modules/ROOT/pages/account-setup.adoc deleted file mode 100644 index 7a16771d16..0000000000 --- a/docs/ota-client-guide/modules/ROOT/pages/account-setup.adoc +++ /dev/null @@ -1,19 +0,0 @@ -= Set up multiple accounts -ifdef::env-github[] - -[NOTE] -==== -We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. -==== -endif::[] - - -In OTA Connect, all devices and software belong to one *user* login. However, you don't want to mix up test software and production software by creating them all under the same user. - -In a proper production workflow, you'll need separate user logins to manage the different stages: - -. A developer user such as "dev@acme.com". -. A QA user such as ""qa@acme.com"". -. A production user such as "prod@acme.com"". - -These logins provide you with a convenient way of clearly separating your development, QA and production resources. \ No newline at end of file diff --git a/docs/ota-client-guide/modules/ROOT/pages/add-board-class.adoc b/docs/ota-client-guide/modules/ROOT/pages/add-board-class.adoc new file mode 100644 index 0000000000..e204b4e2dc --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/add-board-class.adoc @@ -0,0 +1,45 @@ += Add a board class for new target board in meta-updater +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + +Adding a board class for a new target board in meta-updater involves two main steps: + +. Add a new `sota_\{MACHINE}.bbclass` file for the board + ++ +Using an NXP board as an example, we created the bbclass `classes/sota_ls1043ardb.bbclass` and added the following to the class file: ++ +[source,bash] +---- +KERNEL_IMAGETYPES = "fitImage" <1> + +OSTREE_KERNEL = "fitImage-${INITRAMFS_IMAGE}-${MACHINE}-${MACHINE}" +OSTREE_KERNEL_ARGS = "console=ttyS0,115200 ramdisk_size=8192 root=/dev/ram0 rw rootfstype=ext4 ostree_root=/dev/mmcblk0p2" + +WKS_FILE_sota = "ls1043ardb-ota.wks" <2> +IMAGE_BOOT_FILES = "ls1043ardb_boot.scr" <3> +---- ++ +<1> For most boards, especially boards that use a device tree, we recommend you use a https://elinux.org/images/f/f4/Elc2013_Fernandes.pdf[FIT (flattened image tree)] image if possible. A FIT image includes all the components of an initial boot image--like the device tree, initramfs, and kernel--in a bundle. +<2> Wic kickstart files tell Wic how to build and lay out an image that can be physically flashed onto the device initially. We generally need to modify the image layout for OTA; we’ll discuss how to write one in the following section. +<3> This line adds the basic boot script to the files available to bitbake when constructing the physical image types. See xref:add-meta-updater-to-vendors-sdk.adoc#_create_u_boot_script_for_ostree_initialization[Create U-Boot script for OSTree initialization] for the script itself. ++ +. Add that class to the top-level `sota.bbclass` file. ++ +Add a line to the file `classes/sota.bbclass` that points to the new class file as in the example below: ++ +[source,bash] +---- +SOTA_MACHINE_ls1043ardb ?= "ls1043ardb" +---- + +Examples of these class files can be found in the https://github.com/advancedtelematic/meta-updater/tree/master/classes[meta-updater-layer]. + +Once you have added a board class for your board in the meta-updater layer, you can xref:setup-boot-image-for-ostree.adoc[set up a boot image layout for OSTree compatibility]. + + diff --git a/docs/ota-client-guide/modules/ROOT/pages/add-environments.adoc b/docs/ota-client-guide/modules/ROOT/pages/add-environments.adoc new file mode 100644 index 0000000000..167c1784aa --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/add-environments.adoc @@ -0,0 +1,34 @@ += Set up additional environments +:page-aliases: account-setup.adoc +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + + +An xref:ota-web::environments-intro.adoc[environment] is your working space in your https://connect.ota.here.com[OTA Connect account], where you can create and manage software update projects. + +If you do not want to mix up test and production software during xref:evaluation-to-prod.adoc#_integrate_ota_connect[integration with OTA Connect], you may create additional environments. You can xref:ota-web::create-environment.adoc[create] up to 10 additional environments in one OTA Connect account. + +For example, you may need to have different environments for development, QA, and production. + +IMPORTANT: The limit of 10 additional environments applies even if you leave the environments that you have created. There is currently no way to delete environments. + +== Environments for production workflow + +To manage the different stages of production workflow, you should create the following additional environments: + +* A developer environment +* A QA environment +* A production environment + +These environments provide you with a convenient way to separate your development, QA, and production resources. + + +After you create an environment, you can xref:ota-web::manage-members.adoc[add] your colleagues to work together on device provisioning, device groups, software versions, software updates, and campaigns. + +To get more information on the *Environments* feature, see the xref:ota-web::environments-intro.adoc[Environments] section in the OTA Connect User Guide. + diff --git a/docs/ota-client-guide/modules/ROOT/pages/add-meta-updater-to-vendors-sdk.adoc b/docs/ota-client-guide/modules/ROOT/pages/add-meta-updater-to-vendors-sdk.adoc new file mode 100644 index 0000000000..24dd825cb0 --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/add-meta-updater-to-vendors-sdk.adoc @@ -0,0 +1,88 @@ += Add meta-updater features to the vendor's SDK +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + +include::partial$aktualizr-version.adoc[] + +To avoid conflicts and certain classes of bugs, we require the use of `usrmerge`, even on systems that do not use systemd. In the https://www.yoctoproject.org/docs/{yocto-version}/mega-manual/mega-manual.html[Yocto] ecosystem, it is a general best practice to write `do_install()` functions that install to the variable `\{bindir}`, but some recipes still hard-code the location of `/bin`. + +If a board vendor’s BSP has recipes like this, they will need to be patched so that the files install to the correct locations. In older Yocto versions, the only way to do this was to create a new BSP-specific layer with the patches, and only include that BSP-specific layer when the corresponding BSP was in use. (Otherwise, the recipes would fail to parse.) + +Now, however, the dynamic-layers feature allows us to conditionally patch recipes from BSP layers if and only if they are present. This is the approach we took to support the NXP board for our example. + +Several recipes needed to be patched in our example. One example patch can be seen here of the https://git.yoctoproject.org/cgit/cgit.cgi/meta-virtualization/tree/recipes-containers/cgroup-lite/cgroup-lite_1.15.bb[cgroup-lite-recipe], and this can be included in the meta-updater as the file `dynamic-layers/virtualization-layer/recipes-containers/cgroup-lite/cgroup-lite_%.bbappend`: + +[source,bash] +---- +do_install() { + install -d ${D}/${bindir} + install -m 0755 ${S}/scripts/cgroups-mount ${D}/${bindir} + install -m 0755 ${S}/scripts/cgroups-umount ${D}/${bindir} + install -d ${D}${sysconfdir}/init.d + install -m 0755 ${WORKDIR}/cgroups-init ${D}${sysconfdir}/init.d/cgroups-init + install -d ${D}${systemd_unitdir}/system + ln -sf /dev/null ${D}${systemd_unitdir}/system/cgroups-init.service +} +---- + +NOTE: Even though only some of the install statements needed to be modified, the `bbappend` file still needs to include the complete `do_install()` function. + + +== Add OTA Connect/meta-updater FS types + +Meta-updater provides several FS types for secure OTA delivery which need to be added to the FS types for the image class being built. + +For the LS1043ARDB, we targeted the `fsl-image-networking-full.bb` image file, the line below specifies the image types to be built. +[source,bash] +---- +IMAGE_FSTYPES_qoriq = "tar.gz ext2.gz.u-boot ext2.gz" +---- + +This image builds `tar.gz`, `ext2.gz.u-boot`, and `ext2.gz` by default. + +You should add the following FS types `ostreepush`, `garagesign`, `garagecheck`, `ota-ext4`, `ostree.tar.bz2`, `ota.tar.xz` and `wic` with the line below. +[source,bash] +---- +IMAGE_FSTYPES_qoriq_append = " ostreepush garagesign garagecheck ota-ext4 ostree.tar.bz2 ota.tar.xz wic" +---- + +These are defined in the meta-updater layer; simply adding them to an existing definition should usually suffice. + + +== Create U-Boot script for OSTree initialization + +As described in the xref:bsp-integration.adoc#_key_concepts[key concepts section of BSP integration], OSTree needs the bootloader to load a minimal script that points it to the "real" script that OSTree generates to deploy the image. Exactly what this script looks like will vary from board to board, but the basic principle is to direct U-Boot to load the "real" script and the kernel image from the boot partition. + +For the LS1043ARDB, this is how we implemented the script. Note that, because it was very simple, we elected to simply embed the text of the file in the `deploy_append` of the qoriq image; this could also be done via including files, but it would be a bit more roundabout that way. This `bbappend` just cats the 4 lines of the script into a file in bitbake’s deployment directory, and then places it in the image. + +[source,bash] +---- +DEPENDS += "u-boot-mkimage-native" + +do_deploy_append_qoriq() { + cat > ${DEPLOYDIR}/ls1043ardb_boot.txt << EOF +load mmc 0:2 \${load_addr} /boot/loader/uEnv.txt <1> +env import -t \${fileaddr} \${filesize} +load mmc 0:2 \${load_addr} /boot\${kernel_image} +bootm \${load_addr} +EOF + + mkimage -A arm64 -O linux -T script -d ${DEPLOYDIR}/ls1043ardb_boot.txt ${DEPLOYDIR}/ls1043ardb_boot.scr <2> +} +---- + +<1> This is the crucial line pointing the bootloader to the OSTree-managed script. The OSTree-managed script sets the values of `$\{fileaddr}`, `$\{filesize}`, and `$\{kernel_image}`. +<2> Note that `ls1043ardb_boot.scr` is the name of the file specified to be included as `IMAGE_BOOT_FILES` in the board-specific bbclass described in the xref:add-board-class.adoc[first step]. + + +In some cases, it can be necessary or desirable to make the initial script a bit more complex. For example, implementing a boot watchdog for automated rollback needs some extra logic. An example of a more complex script can be seen in the https://github.com/advancedtelematic/meta-updater-raspberrypi/blob/master/recipes-bsp/u-boot-otascript/u-boot-otascript/uEnv.txt[meta-updater-raspberrypi] repo. + + +See also: + +* link:https://www.freedesktop.org/wiki/Software/systemd/TheCaseForTheUsrMerge/[The Case for the /usr Merge] diff --git a/docs/ota-client-guide/modules/ROOT/pages/add-ota-functonality-existing-yocto-project.adoc b/docs/ota-client-guide/modules/ROOT/pages/add-ota-functonality-existing-yocto-project.adoc index 88609e890b..4247bdfb3b 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/add-ota-functonality-existing-yocto-project.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/add-ota-functonality-existing-yocto-project.adoc @@ -7,20 +7,27 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] +include::partial$aktualizr-version.adoc[] + :page-layout: page :page-categories: [quickstarts] :page-date: 2017-05-23 16:27:58 :page-order: 6 :icons: font -If you already have a Yocto-based project that you want to update using {product-name}, you just need to do four things to get started: +If you already have a Yocto-based project, you can start your functional integration with {product-name} by following these four steps: -1. Clone the https://github.com/advancedtelematic/meta-updater[meta-updater] layer and add it to your https://www.yoctoproject.org/docs/2.6/ref-manual/ref-manual.html#structure-build-conf-bblayers.conf[bblayers.conf]. -2. Clone a BSP integration layer (`meta-updater-$\{PLATFORM}`, e.g. https://github.com/advancedtelematic/meta-updater-raspberrypi[meta-updater-raspberrypi]) and add it to your conf/bblayers.conf. If your board isn't supported yet, you could write a BSP integration for it yourself. See the <> section for the details. -3. Set up your https://www.yoctoproject.org/docs/2.6/ref-manual/ref-manual.html#var-DISTRO[distro]. If you are using "poky", the default distro in Yocto, you can change it in your conf/local.conf to "poky-sota". Alternatively, if you are using your own or a third-party distro configuration, you can add `INHERIT += " sota"` to it, thus combining the capabilities of your distro with meta-updater features. -4. {app-url}/#/profile/access-keys[Create a provisioning key, window="_blank"] and add it to your local.conf. +1. Clone the https://github.com/advancedtelematic/meta-updater[meta-updater] layer and add it to your https://www.yoctoproject.org/docs/{yocto-version}/ref-manual/ref-manual.html#structure-build-conf-bblayers.conf[bblayers.conf]. +2. Clone a BSP integration layer (`meta-updater-$\{PLATFORM}`, e.g. https://github.com/advancedtelematic/meta-updater-raspberrypi[meta-updater-raspberrypi]) and add it to your `conf/bblayers.conf`. If your board isn't supported yet, you could write a BSP integration for it yourself. See xref:supported-boards.adoc#_adding_support_for_your_board[Adding support for your board] for more details. +3. Set up your https://www.yoctoproject.org/docs/{yocto-version}/ref-manual/ref-manual.html#var-DISTRO[distro]. If you are using "poky", the default distro in Yocto, you can change it in your `conf/local.conf` to `poky-sota` or to `poky-sota-systemd`. Alternatively, if you are using your own or a third-party distro configuration, you can add the following parameters to it, thus combining the capabilities of your distro with meta-updater features. ++ +---- +INHERIT += " sota" +DISTRO_FEATURES_append = " sota systemd usrmerge" +---- +4. {app-url}/#/profile/access-keys[Create a provisioning key, window="_blank"] and add it to your `local.conf`. -You can then build your image as usual, with bitbake. After building the root file system, bitbake will then create an https://ostree.readthedocs.io/en/latest/manual/adapting-existing/[OSTree-enabled version] of it, commit it to your local OSTree repo, and push it to OTA Connect. Additionally, a live disk image will be created (normally named `$\{IMAGE_NAME}.wic` e.g. `core-image-minimal-raspberrypi3.wic`). +You can then build your image as usual, with bitbake. After building the root file system, bitbake will then create an https://ostreedev.github.io/ostree/adapting-existing/[OSTree-enabled version] of it, commit it to your local OSTree repo, and push it to OTA Connect. Additionally, a live disk image will be created (normally named `$\{IMAGE_NAME}.wic` e.g. `core-image-minimal-raspberrypi3.wic`). See also: diff --git a/docs/ota-client-guide/modules/ROOT/pages/aktualizr-config-options.adoc b/docs/ota-client-guide/modules/ROOT/pages/aktualizr-config-options.adoc index 9eb8aa43f9..a06f501388 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/aktualizr-config-options.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/aktualizr-config-options.adoc @@ -7,6 +7,8 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] +include::partial$aktualizr-version.adoc[] + :page-layout: page :page-categories: [client-config] :page-date: 2018-07-05 11:14:01 @@ -21,9 +23,9 @@ Here, we provide reference documentation on aktualizr's usage and configuration. == How .toml files are processed -Aktualizr is configured via `.toml` config files. One or more files or directories can be passed to the application via the `--config` flag (one per file or directory). +Aktualizr is configured via `.toml` config files. One or more files or directories can be passed to the application via the `--config` flag (one per file or directory). -* If `--config` is not specified on the command line, aktualizr searches the following directories for files with a `.toml` extension: +* If `--config` is not specified on the command line, aktualizr searches the following directories for files with a `.toml` extension: ** `/usr/lib/sota/conf.d` ** `/etc/sota/conf.d/` @@ -31,14 +33,14 @@ Aktualizr searches for and processes these config files in systemd style using t * If multiple files are found with the same name, the last detected file overrules and hides the others. * Files are then processed in alphabetical order with the following conditions: -** If a config option is specified in multiple files, the last entry **overrules** the previous entries. +** If a config option is specified in multiple files, the last entry **overrules** the previous entries. ** But if a config option is specified in the first file but *unspecified* in the last file, the last entry **does not** overrule the previous entry. For examples of configuration files, see the following resources: * link:{aktualizr-github-url}/config/[Config files used by unit tests] * link:{aktualizr-github-url}/tests/config/[Config files used by continuous integration tests] -* link:https://github.com/advancedtelematic/meta-updater/tree/thud/recipes-sota/config/files[Configuration fragments used in meta-updater recipes]. +* link:https://github.com/advancedtelematic/meta-updater/tree/{yocto-branch}/recipes-sota/config/files[Configuration fragments used in meta-updater recipes]. All fields are optional, and most have reasonable defaults that should be used unless you have a particular need to do otherwise. @@ -96,15 +98,16 @@ Options for how the device is provisioned with the backend. [options="header"] |========================================================================================== -| Name | Default | Description -| `server` | | Server provisioning URL. If empty, set to `tls.server`. -| `p12_password` | | Password for PKCS#12 encryption. -| `expiry_days` | `"36000"` | Provided in the `ttl` field of the device provisioning request sent to the server. -| `provision_path` | | Path to an archive containing provisioning data. See the xref:provisioning-methods-and-credentialszip.adoc[reference documentation] for the specification of the contents of this file. -| `device_id` | | Device ID of the primary ECU. If left empty, a random name will be generated. -| `primary_ecu_serial` | | Serial number of the primary ECU. If left empty, a random serial will be generated. -| `primary_ecu_hardware_id` | | The hardware ID of the primary ECU (e.g., `"raspberry-pi"`). If left empty, the hostname of the device will be used. -| `ecu_registration_endpoint` | | Ecu provisioning URL. If empty, set to `uptane.director_server` with `/ecus` appended. +| Name | Default | Description +| `server` | | Server provisioning URL. If empty, set to `tls.server`. +| `p12_password` | | Password for PKCS#12 encryption. +| `expiry_days` | `"36000"` | Provided in the `ttl` field of the device provisioning request sent to the server. +| `provision_path` | | Path to an archive containing provisioning data. See the xref:provisioning-methods-and-credentialszip.adoc[reference documentation] for the specification of the contents of this file. +| `device_id` | | Device ID of the Primary ECU. If left empty, a random name will be generated. +| `primary_ecu_serial` | | Serial number of the Primary ECU. If left empty, a random serial will be generated. +| `primary_ecu_hardware_id` | | The hardware ID of the Primary ECU (e.g., `"raspberry-pi"`). If left empty, the hostname of the device will be used. +| `ecu_registration_endpoint` | | ECU registration URL. If empty, set to `uptane.director_server` with `/ecus` appended. +| `mode` | `"SharedCred"` | See the xref:client-provisioning-methods.html[provisioning documentation] for more details. Options: `"DeviceCred"`, `"SharedCred"`, `"SharedCredReuse"`. The last is intended solely for testing purposes. |========================================================================================== If you intend to provision with a server by using https://github.com/advancedtelematic/meta-updater[meta-updater], you will probably want to set `provision.provision_path = "/var/sota/sota_provisioning_credentials.zip"`. @@ -115,14 +118,15 @@ Options for Uptane. [options="header"] |========================================================================================== -| Name | Default | Description -| `polling_sec` | `10` | Interval between polls (in seconds). -| `director_server` | | Director server URL. If empty, set to `tls.server` with `/director` appended. -| `repo_server` | | Image repository server URL. If empty, set to `tls.server` with `/repo` appended. -| `key_source` | `"file"` | Where to read the device's private key from. Options: `"file"`, `"pkcs11"`. -| `key_type` | `"RSA2048"` | Type of cryptographic keys to use. Options: `"ED25519"`, `"RSA2048"`, `"RSA3072"` or `"RSA4096"`. -| `secondary_configs_dir` | `""` | Directory containing individual secondary json configuration files. Example here: link:{aktualizr-github-url}/config/secondary/virtualsec.json[] -| `force_install_completion`| false | Forces installation completion. Causes a system reboot in case of an ostree package manager. Emulates a reboot in case of a fake package manager. +| Name | Default | Description +| `polling_sec` | `10` | Interval between polls (in seconds). +| `director_server` | | Director server URL. If empty, set to `tls.server` with `/director` appended. +| `repo_server` | | Image repository server URL. If empty, set to `tls.server` with `/repo` appended. +| `key_source` | `"file"` | Where to read the device's private key from. Options: `"file"`, `"pkcs11"`. +| `key_type` | `"RSA2048"` | Type of cryptographic keys to use. Options: `"ED25519"`, `"RSA2048"`, `"RSA3072"` or `"RSA4096"`. +| `force_install_completion` | false | Forces installation completion. Causes a system reboot when using the OSTree package manager. Emulates a reboot when using the fake package manager. +| `secondary_config_file` | `""` | Secondary json configuration file. Example here: link:{aktualizr-github-url}/config/secondary/virtualsec.json[] +| `secondary_preinstall_wait_sec` | `600` | Time to wait for reachable secondaries before attempting an installation. |========================================================================================== === `pacman` @@ -132,23 +136,27 @@ Options for package management and update installation. Note that this only coin [options="header"] |========================================================================================== | Name | Default | Description -| `type` | `"ostree"` | Which package manager to use. Options: `"ostree"`, `"debian"`, `"none"`. +| `type` | `"ostree"` | Which package manager to use. Options: `"ostree"`, `"none"`. | `os` | | OSTree operating system group. Only used with `ostree`. | `sysroot` | | Path to an OSTree sysroot. Only used with `ostree`. | `ostree_server` | | OSTree server URL. Only used with `ostree`. If empty, set to `tls.server` with `/treehub` appended. | `packages_file` | `"/usr/package.manifest"` | Path to a file for storing package manifest information. Only used with `ostree`. +| `images_path` | `"/var/sota/images"` | Directory to store downloaded binary Targets. Only used with `none`. | `fake_need_reboot` | false | Simulate a wait-for-reboot with the `"none"` package manager. Used for testing. |========================================================================================== === `storage` -Options for how Aktualizr stores data locally. +Options for how aktualizr stores data locally. [options="header"] |========================================================================================== | Name | Default | Description | `type` | `"sqlite"` | What type of storage driver to use. Options: `"sqlite"`. The former `"filesystem"` option is now disabled, existing devices will be migrated (see note below) -| `path` | `"/var/sota"` | Directory for storage +| `path` | `"/var/sota"` | Directory for storage. + +This should be a directory dedicated to aktualizr data. Aktualizr will attempt to set permissions on this directory, so this option should not be set to anything that is used for another purpose. In particular, do not set it to `/` or to your home directory, as this may render your system unusable. + | `sqldb_path` | `"sql.db"` | Relative path to the database file. | `uptane_metadata_path` | `"metadata"` | Path to the uptane metadata store, for migration from `filesystem`. | `uptane_private_key_path` | `"ecukey.der"` | Relative path to the Uptane specific private key, for migration from `filesystem`. @@ -161,7 +169,7 @@ Options for how Aktualizr stores data locally. The only supported storage option is now `sqlite`. Old systems configured with `filesystem` can be migrated by changing the `type` field to `sqlite` and keeping all the other fields as-is. -At the next Aktualizr run, the migration procedure will then run automatically and move existing data inside the database. +At the next aktualizr run, the migration procedure will then run automatically and move existing data inside the database. === `import` @@ -195,8 +203,9 @@ Options for configuring boot-specific behavior [options="header"] |========================================================================================== | Name | Default | Description -| `rollback_mode` | `"none"` | Controls rollback on supported platforms, see link:{aktualizr-github-url}/docs/rollback.adoc[]. Options: `"none"`, `"uboot_generic"`, `"uboot_masked"` +| `rollback_mode` | `"none"` | Controls rollback on supported platforms, see xref:rollback.adoc[]. Options: `"none"`, `"uboot_generic"`, `"uboot_masked"` | `reboot_sentinel_dir` | `"/var/run/aktualizr-session"` | Base directory for reboot detection sentinel. Must reside in a temporary file system. | `reboot_sentinel_name` | `"need_reboot"` | Name of the reboot detection sentinel. +| `reboot_command` | `"/sbin/reboot"` | Command to reboot the system after update completes. Applicable only if `uptane::force_install_completion` is set to `true`. |========================================================================================== diff --git a/docs/ota-client-guide/modules/ROOT/pages/bsp-integration.adoc b/docs/ota-client-guide/modules/ROOT/pages/bsp-integration.adoc new file mode 100644 index 0000000000..03eaf7a041 --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/bsp-integration.adoc @@ -0,0 +1,48 @@ += BSP Integration +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + + +Adding support for a new board isn't too hard, if you're already familiar with Yocto and you have good documentation about your board's hardware. In this guide, we'll present conceptual information on what steps need to be taken, and then illustrate the concrete steps needed by taking an NXP board (the https://www.nxp.com/design/qoriq-developer-resources/qoriq-ls1043a-development-board:LS1043A-RDB[LS1043A-RDB]) as an example. + +All of the changes we're making in this guide will be done in https://github.com/advancedtelematic/meta-updater/[meta-updater]. While you're working on adding support for your new board, you should fork meta-updater. Once the support is ready, please feel free to make a pull request to get it merged into mainline meta-updater. + +NOTE: You can also add support for your board by writing a separate layer--see https://github.com/advancedtelematic/meta-updater-raspberrypi[meta-updater-raspberrypi] for an example of this approach. + + + +== Key Concepts + +There are two important concepts about *OSTree* and *meta-updater* that you need to know before proceeding with the integration. + +. *OSTree* ++ +OSTree image deployments are installed inside the `/ostree` directory, and then hard-linked into place in `/boot` at boot time using a bootloader configuration script generated by OSTree when the image is verified. ++ +On an OSTree-managed system, OSTree can tell the bootloader which kernel, initramfs, and device tree blob to load. There are two basic ways to do this: ++ +* On a system with link:https://source.denx.de/u-boot/u-boot/raw/master/doc/develop/distro.rst[Distro Boot], OSTree sets the U-Boot environment variables to the appropriate values when OSTree switches to a new deployment. +* Systems that do not support Distro Boot (for example, GRUB) have a very minimal, simple bootloader configuration script that is fixed in the bootloader partition. This script points the bootloader to a second bootloader script, which is managed by OSTree. OSTree switches to a new deployment by replacing the second script with a new one which points to the new deployment. ++ +NOTE: OSTree will only switch to the new deployment once it has verified that the complete filesystem tree is present; libaktualizr will only instruct OSTree to switch once it has performed xref:uptane.adoc[Uptane] verification on the metadata directing it to switch. +. *meta-updater* ++ +The https://github.com/advancedtelematic/meta-updater/[meta-updater] layer contains recipes for the applications and libraries required by libaktualizr and OSTree, along with patches to upstream recipes when modifications are necessary. It also includes board and image classes for the OTA-specific needs of the boards it supports. + + +== Steps in adding a new board + +These are the required steps you need to integrate an unsupported board with OTA Connect. + +. xref:add-board-class.adoc[Add a board class for the new target board in meta-updater] +. xref:setup-boot-image-for-ostree.adoc[Set up a boot image layout for OSTree compatibility] +. xref:add-meta-updater-to-vendors-sdk.adoc[Add meta-updater features to the vendor's SDK] + + + + diff --git a/docs/ota-client-guide/modules/ROOT/pages/build-agl.adoc b/docs/ota-client-guide/modules/ROOT/pages/build-agl.adoc index 1bc1d6c0ef..2485e8e52d 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/build-agl.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/build-agl.adoc @@ -1,4 +1,5 @@ = Build an Automotive Grade Linux image +:page-partial: ifdef::env-github[] [NOTE] @@ -7,7 +8,9 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] -:page-partial: +include::ota-client::partial$aktualizr-version.adoc[] + + :page-layout: page :page-categories: [quickstarts] :page-date: 2017-05-16 15:54:29 @@ -21,23 +24,21 @@ endif::[] // Most of the content here is the same as the Raspberry Pi instructions, so we re-use the chunks we can. -include::build-raspberry.adoc[tags=prereqs] +include::ota-client::page$build-raspberry.adoc[tags=prereqs] == Create your AGL Yocto build environment -=== Get AGL master manifest +=== Get the AGL code -First, clone a manifest file for AGL Electric Eel: +First, use the manifest file for AGL's Itchy Icefish release to download the required repositories: ---- mkdir myproject cd myproject -repo init -b eel -m default.xml -u https://gerrit.automotivelinux.org/gerrit/AGL/AGL-repo.git +repo init -b icefish -m icefish_9.0.1.xml -u https://gerrit.automotivelinux.org/gerrit/AGL/AGL-repo repo sync ---- -This will download the basic Yocto layers you need. Generally, HERE Technologies recommends using AGL's latest point release. - .What is this actually doing? **** Yocto is a set of tools, templates and methods for building Linux systems from scratch. Automotive Grade Linux is a complete Linux distribution designed for in-car systems. It includes base system layers from Poky and OpenEmbedded, board support layers for popular automotive platforms, and quite a lot more. @@ -50,28 +51,43 @@ All of these layers are assembled into a built Linux system by Bitbake, the buil Now you can run the following script to get the environment set up: ---- -source meta-agl/scripts/aglsetup.sh -m agl-demo agl-appfw-smack agl-sota <1> +source meta-agl/scripts/aglsetup.sh -m agl-sota <1> ---- <1> Where `` is either `raspberrypi3` or `qemux86-64`. -IMPORTANT: Only `raspberrypi3` and `qemux86-64` will work out of the box. If you want to create an {product-name-short}-compatible build for one of the other architectures AGL supports, you'll need to write a BSP layer for that board. You can take the link:https://github.com/advancedtelematic/meta-updater-raspberrypi/tree/morty/recipes-bsp[Raspberry Pi BSP] as an example of what's needed. You can also link:mailto:otaconnect.support@here.com[contact us directly] to inquire about commercial development of BSP layers for specific boards. +IMPORTANT: Only `raspberrypi3` and `qemux86-64` will work out of the box. If you want to create an {product-name-short}-compatible build for one of the other architectures AGL supports, you may need to write a BSP layer for that board. You can take the link:https://github.com/advancedtelematic/meta-updater-raspberrypi/tree/morty/recipes-bsp[Raspberry Pi BSP] as an example of what's needed. You can also link:mailto:otaconnect.support@here.com[contact us directly] to inquire about commercial development of BSP layers for specific boards. -include::build-raspberry.adoc[tags=config;bitbake] +include::ota-client::page$build-raspberry.adoc[tags=config] -== Put the built image on your device's boot media +== Bitbake -The build process creates disk images as an artefact. The exact image you'll need will vary depending on the architecture you're building forfootnote:[For example, building the `agl-demo-platform` target for Raspberry Pi 3 creates an image at `build/tmp/deploy/images/raspberrypi3/agl-demo-platform-raspberrypi3.wic`.], but it will be located in the `/tmp/deploy/images` directory under your build directory. We recommend using https://www.balena.io/etcher/[Etcher, window="_blank"] to write the image, or following the normal flashing procedure for your device if applicable. +Now you're ready to build your image. -TIP: You can also write the image using `dd`, but since the wrong kind of typo in a dd command is so dangerous, we don't recommend it. +[subs=+attributes] +---- +bitbake agl-image-minimal +---- -You'll probably also want to resize the main partition to fill all of the space on the boot media: +This step will take a while. Building everything from scratch, it will likely take several hours. + +If the build fails due a problem with the tar recipe, try this command: ---- -sudo parted -s /dev/sdX resizepart 2 '100%' <1> -sudo resize2fs /dev/sdX2 <1> +bitbake -c clean tar-native ---- -<1> Where /dev/sdX is the device you wrote the image to. -You should now be able to boot your device and have it show up in your {product-name-short} account. +You can then retry bitbaking your image. + +== Running the built image + +=== Put the built image on an SD card for Raspberry Pi 3 + +You can now flash the image onto an SD card using the same method as described for a xref:build-raspberry.adoc#_put_the_built_image_on_an_sd_card[regular Raspberry Pi build]. However, the exact image you'll need will vary depending on the architecture you're building forfootnote:[For example, building the `agl-image-minimal` target for Raspberry Pi 3 creates an image at `build/tmp/deploy/images/raspberrypi3/agl-image-minimal-raspberrypi3.wic`.], but it will be located in the `/tmp/deploy/images` directory under your build directory. You can also use https://www.balena.io/etcher/[Etcher, window="_blank"] to write the image, or follow the normal flashing procedure for your device if applicable. + +TIP: You can also write the image using `dd`, but since the wrong kind of typo in a dd command is so dangerous, we don't recommend it. + +=== Run with QEMU +You can now run the image in QEMU using the same method as described for a xref:build-qemu.adoc#_run_the_built_image_with_qemu[regular QEMU build]. However, the exact image you'll need will vary depending on the architecture you're building forfootnote:[For example, building the `agl-image-minimal` target for QEMU creates an image at `build/tmp/deploy/images/qemux86-64/agl-image-minimal-qemux86-64.ota-ext4`.], but it will be located in the `/tmp/deploy/images` directory under your build directory. +include::ota-client::partial$recommended-steps.adoc[tags=firstbuild-nextstep] diff --git a/docs/ota-client-guide/modules/ROOT/pages/build-configuration.adoc b/docs/ota-client-guide/modules/ROOT/pages/build-configuration.adoc index 44595bbdbf..302b4b43e9 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/build-configuration.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/build-configuration.adoc @@ -7,20 +7,26 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] +== Configuration variables in meta-updater + +Here, we provide reference documentation on how to configure link:https://github.com/advancedtelematic/meta-updater[meta-updater]. Below are the variables that you can set in your `local.conf` to control the finer points of your build process. + .OTA-related options for building disk images [cols="1,2a",options="header"] |==================== | Option | Description -| `OSTREE_BRANCHNAME`|OSTree branch name. Defaults to `${SOTA_HARDWARE_ID}`. Particularly useful for grouping similar images. Should only contain characters from the character set `[a-zA-Z0-9_-]`. +| `OSTREE_BRANCHNAME`|OSTree branch name. Defaults to `$\{SOTA_HARDWARE_ID}`. Particularly useful for grouping similar images. Should only contain characters from the character set `[a-zA-Z0-9_-]`. | `OSTREE_REPO`|Path to your OSTree repository. Defaults to `$\{DEPLOY_DIR_IMAGE}/ostree_repo` -| `OSTREE_OSNAME`|OS deployment name on your target device. For more information about deployments and osnames see the https://ostree.readthedocs.io/en/latest/manual/deployment/[OSTree documentation]. Defaults to "poky". +| `OSTREE_OSNAME`|OS deployment name on your target device. For more information about deployments and osnames see the https://ostreedev.github.io/ostree/deployment/[OSTree documentation]. Defaults to "poky". | `OSTREE_COMMIT_BODY`|Message attached to OSTree commit. Empty by default. -| `OSTREE_COMMIT_SUBJECT`|Commit subject used by OSTree. Defaults to `Commit-id: ${IMAGE_NAME}` +| `OSTREE_COMMIT_SUBJECT`|Commit subject used by OSTree. Defaults to `Commit-id: $\{IMAGE_NAME}` | `OSTREE_UPDATE_SUMMARY`|Set this to '1' to update summary of OSTree repository on each commit. '0' by default. -| `OSTREE_DEPLOY_DEVICETREE`|Set this to '1' to include devicetree(s) to boot +| `OSTREE_DEPLOY_DEVICETREE`|Set this to '1' to include devicetree(s) to boot. +| `OSTREE_DEVICETREE`| A list of device tree blob(s) to include in the OSTree image. Defaults to `$\{KERNEL_DEVICETREE}`. | `GARAGE_SIGN_AUTOVERSION`|Set this to '1' to automatically fetch the last version of the garage tools installed by the aktualizr-native. Otherwise use the fixed version specified in the recipe. +| `GARAGE_TARGET_NAME`|Target name used in the Uptane metadata. Defaults to `$\{OSTREE_BRANCHNAME}`. | `GARAGE_TARGET_URL` | Sets the `--url` parameter of `garage-sign targets add`, which sets a custom URL for the Image repository targets. -| `GARAGE_TARGET_EXPIRES` | +| `GARAGE_TARGET_EXPIRES` | // MC: This block shows reusable content snippets when the content is rendered in the docs portal. // We reuse the same descriptions in multiple places with includes - if rendered in Github (where includes aren't allowed), we fall back to the old descriptions. ifdef::env-github[Sets the `--expires` parameter of `garage-sign targets sign`. Format is a UTC instant such as '2018-01-01T00:01:00Z'.] @@ -29,11 +35,8 @@ include::partial$config-descriptions.adoc[tags=metadata-expires] ---- GARAGE_TARGET_EXPIRES = "2018-01-01T00:01:00Z" ---- -[NOTE] -==== -Currently, this only works when using the master branch of meta-updater. -==== endif::[] +More detail is available in the xref:metadata-expiry.adoc[metadata expiry] page. | `GARAGE_TARGET_EXPIRE_AFTER` | // MC: This block shows reusable content snippets when the content is rendered in the docs portal. // We reuse the same descriptions in multiple places with includes - if rendered in Github (where includes aren't allowed), we fall back to the old descriptions. @@ -43,17 +46,14 @@ include::partial$config-descriptions.adoc[tags=metadata-expireafter] ---- GARAGE_TARGET_EXPIRE_AFTER = "1Y3M5D" ---- -[NOTE] -==== -Currently, this only works when using the master branch of meta-updater. -==== endif::[] +More detail is available in the xref:metadata-expiry.adoc[metadata expiry] page. | `INITRAMFS_IMAGE`|The initramfs/initrd image that is used as a proxy while booting into OSTree deployment. Do not change this setting unless you are sure that your initramfs can serve as such a proxy. -| `SOTA_PACKED_CREDENTIALS`|When set, your ostree commit will be pushed to a remote repo as a bitbake step. This should be the path to a zipped credentials file in xref:provisioning-methods-and-credentialszip.adoc[the format accepted by garage-push]. +| `SOTA_PACKED_CREDENTIALS`|When set, your OSTree commit will be pushed to a remote repo as a bitbake step. This should be the path to a zipped credentials file in xref:provisioning-methods-and-credentialszip.adoc[the format accepted by garage-push]. | `SOTA_DEPLOY_CREDENTIALS`|When set to '1' (default value), deploys credentials to the built image. Override it in `local.conf` to built a generic image that can be provisioned manually after the build. -| `SOTA_CLIENT_PROV`|Which provisioning method to use. Valid options are `aktualizr-shared-prov`, `aktualizr-device-prov`, and `aktualizr-device-prov-hsm`. For more information on these provisioning methods, see the xref:client-provisioning-methods.adoc[OTA Connect documentation]. The default is `aktualizr-shared-prov`. This can also be set to an empty string to avoid using a provisioning recipe. -| `SOTA_CLIENT_FEATURES`|Extensions to aktualizr. The only valid options are `hsm` (to build with HSM support) and `secondary-network` (to set up a simulated 'in-vehicle' network with support for a primary node with a DHCP server and a secondary node with a DHCP client). -| `SOTA_SECONDARY_CONFIG`|A file containing JSON configuration for secondaries. It will be installed into `/etc/sota/ecus` on the device and automatically provided to aktualizr. See xref:posix-secondaries-bitbaking.adoc[here] for more details. +| `SOTA_CLIENT_PROV`|Which provisioning method to use. Valid options are `aktualizr-shared-prov`, `aktualizr-device-prov`, and `aktualizr-device-prov-hsm`. See the xref:client-provisioning-methods.adoc[client provisioning methods] page for more information on these provisioning methods. The default is `aktualizr-shared-prov`. This can also be set to an empty string to avoid using a provisioning recipe. +| `SOTA_CLIENT_FEATURES`|Extensions to aktualizr. The only valid options are `hsm` (to build with HSM support), `ubootenv` (to enable rollback support in U-Boot; currently only supported for Raspberry Pi), and `serialcan` (to enable serial CAN support). +| `SOTA_SECONDARY_CONFIG`|A file containing JSON configuration for Secondaries. It will be installed into `/etc/sota/ecus` on the device and automatically provided to aktualizr. See the guide on xref:posix-secondaries-bitbaking.adoc[bitbaking POSIX Secondaries] for more details. | `SOTA_HARDWARE_ID`|A custom hardware ID that will be written to the aktualizr config. Defaults to MACHINE if not set. | `SOTA_MAIN_DTB`|The base device tree to use with the kernel. Used together with FIT images. You can change it, and the device tree will also be changed after the update. | `SOTA_DT_OVERLAYS`|A whitespace-separated list of used device tree overlays for FIT image. This list is OSTree-updateable as well. @@ -61,3 +61,30 @@ endif::[] | `RESOURCE_xxx_pn-aktualizr`|Controls maximum resource usage of the aktualizr service, when `aktualizr-resource-control` is installed on the image. See xref:meta-updater-usage.adoc#_aktualizr_service_resource_control[aktualizr service resource control] for details. | `SOTA_POLLING_SEC`|Sets polling interval for aktualizr to check for updates if aktualizr-polling-interval is included in the image. |==================== + +== Custom aktualizr versions + +You can override the version of aktualizr included in your image. This requires that the version you wish to run is pushed to the https://github.com/advancedtelematic/aktualizr[aktualizr github repo]. You can then use these settings in your `local.conf` to simplify the development process: + +[options="header"] +|====================== +| Option | Effect +| `require classes/sota_bleeding.inc` | Build the latest head (by default, using the master branch) of Aktualizr +| `BRANCH_pn-aktualizr = "mybranch"` + +`BRANCH_pn-aktualizr-native = "mybranch"` | Build `mybranch` of Aktualizr. Note that both of these need to be set. This is normally used in conjunction with `require classes/sota_bleeding.inc` +| `SRCREV_pn-aktualizr = "1004efa3f86cef90c012b34620992b5762b741e3"` + +`SRCREV_pn-aktualizr-native = "1004efa3f86cef90c012b34620992b5762b741e3"` | Build the specified revision of Aktualizr. Note that both of these need to be set. This can be used in conjunction with `BRANCH_pn-aktualizr` and `BRANCH_pn-aktualizr-native` but will conflict with `require classes/sota_bleeding.inc` +| `TOOLCHAIN_HOST_TASK_append = " nativesdk-cmake "` | Use with `bitbake -c populate_sdk core-image-minimal` to build an SDK. See the https://github.com/advancedtelematic/aktualizr#developing-against-an-openembedded-system[aktualizr repo] for more information. +|====================== + +== Overriding target version +*Warning: overriding target version is a dangerous operation, make sure you understand this section completely before doing it.* + +Every time you build an image with `SOTA_PACKED_CREDENTIALS` set, a new entry in your Uptane metadata is created and you can see it in the OTA Garage UI if you're using one. Normally this version will be equal to OSTree hash of your root file system. If you want it to be different though you can override is using one of two methods: + +1. Set `GARAGE_TARGET_VERSION` variable in your `local.conf`. +2. Write a recipe or a bbclass to write the desired version to `$\{STAGING_DATADIR_NATIVE}/target_version`. An example of such bbclass can be found in `classes/target_version_example.bbclass`. + +Please note that [target name, target version] pairs are expected to be unique in the system. If you build a new target with the same target version as a previously built one, the old package will be overwritten on the update server. It can have unpredictable effect on devices that have this version installed, and it is not guaranteed that information will be reported correctly for such devices or that you will be able to update them (we're doing our best though). The easiest way to avoid problems is to make sure that your overriding version is as unique as an OSTree commit hash. diff --git a/docs/ota-client-guide/modules/ROOT/pages/build-qemu.adoc b/docs/ota-client-guide/modules/ROOT/pages/build-qemu.adoc index 2e6342e21c..622c2c5176 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/build-qemu.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/build-qemu.adoc @@ -1,4 +1,5 @@ -= Build a QEMU/VirtualBox += Build a QEMU image +:page-partial: ifdef::env-github[] [NOTE] @@ -7,7 +8,8 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] -:page-partial: +include::ota-client::partial$aktualizr-version.adoc[] + :page-layout: page :page-categories: [quickstarts] :page-date: 2017-05-16 15:49:22 @@ -18,13 +20,13 @@ endif::[] :machine: qemux86-64 :meta-env: qemux86-64 -{product-name} lets you easily manage OTA updates to embedded devices running custom-built Yocto images. This is a guide for building a simple Yocto image that you can run in Virtualbox or QEMU. This is a good way to get started if you don't know yet what hardware your project will use, or if you just want to try out the features of {product-name} without worrying about physical devices. +{product-name} lets you easily manage OTA updates to embedded devices running custom-built Yocto images. This is a guide for building a simple Yocto image that you can run in QEMU. This is a good way to get started if you don't know yet what hardware your project will use, or if you just want to try out the features of {product-name} without worrying about physical devices. -include::build-raspberry.adoc[tags=prereqs;env-setup;config;bitbake] +include::ota-client::page$build-raspberry.adoc[tags=prereqs;env-setup;config;bitbake] == Run the built image with QEMU -The build process creates disk images as an artefact. You can then directly run them with QEMU. (If you don't already have it installed, install it with `apt-get install qemu` or similar.) The meta-updater layer contains a helper script to launch the images: +The build process creates disk images as an artefact. You can then directly run them with QEMU. (If you don't already have it installed, install it with `apt install qemu` or similar.) The meta-updater layer contains a helper script to launch the images: ---- ../meta-updater/scripts/run-qemu-ota [image name] [mac address] @@ -32,6 +34,8 @@ The build process creates disk images as an artefact. You can then directly run Both arguments are optional; image name defaults to `core-image-minimal`, and if a mac address isn't specified, a random one is generated. +TIP: Depending on your build, the `meta-updater` directory might be in a slightly different location. For example, if you're building an xref:build-agl.adoc[AGL image], the meta-updater layer would be at `../external/meta-updater`. + .Persistent storage **** By default, QEMU will run your image in snapshot mode, *discarding any changes you made* to the disk image as soon as it exits. If you want to have a persistent VM, you need to create an link:https://wiki.archlinux.org/index.php/QEMU#Overlay_storage_images[overlay storage image] in qcow2 format. The helper script can also manage this for you, making it easy to create an emulated fleet of devices: @@ -44,3 +48,5 @@ If the specified overlay image doesn't yet exist, it will be created first, or l **** You should see your new device appear in {product-name-short} shortly after it boots. It will generate a random name for itself during autoprovisioning; you can change the name later. + +include::partial$recommended-steps.adoc[tags=firstbuild-nextstep] diff --git a/docs/ota-client-guide/modules/ROOT/pages/build-raspberry.adoc b/docs/ota-client-guide/modules/ROOT/pages/build-raspberry.adoc index ce5feb6793..e9c41998d3 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/build-raspberry.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/build-raspberry.adoc @@ -1,4 +1,5 @@ = Build a Raspberry Pi image +:page-partial: ifdef::env-github[] [NOTE] @@ -7,7 +8,8 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] -:page-partial: +include::partial$aktualizr-version.adoc[] + :page-layout: page :page-categories: [quickstarts] :page-date: 2017-05-16 15:48:37 @@ -29,31 +31,29 @@ endif::[] You'll need a build machine with the following: -* A x86-64 Linux distro link:https://www.yoctoproject.org/docs/2.6/ref-manual/ref-manual.html#detailed-supported-distros[supported by the Yocto project] with the link:https://www.yoctoproject.org/docs/2.6/ref-manual/ref-manual.html#required-packages-for-the-build-host[required packages] installed. +* A x86-64 Linux distro link:https://www.yoctoproject.org/docs/{yocto-version}/ref-manual/ref-manual.html#detailed-supported-distros[supported by the Yocto project] with the link:https://www.yoctoproject.org/docs/{yocto-version}/ref-manual/ref-manual.html#required-packages-for-the-build-host[required packages] installed. ** On a Debian-based system, you should be able to install all the required packages with the following command: + ---- -sudo apt-get install gawk wget git-core diffstat unzip texinfo gcc-multilib build-essential chrpath socat cpio python python3 python3-pip python3-pexpect python-dev xz-utils debianutils iputils-ping cpu-checker default-jre parted +sudo apt install gawk wget git diffstat unzip texinfo gcc-multilib build-essential chrpath socat cpio python python3 python3-pip python3-pexpect python-dev xz-utils debianutils iputils-ping cpu-checker default-jre parted ---- ** Many/most distros that aren't on the officially supported list will still work just fine--feel free to give it a try with whatever you're running. -** Although the Yocto project as a whole does support architectures other than x86-64 for the build machine, one of the layers we'll be using only supports x86-64. -** You *can* run this all inside a VM, but a Yocto build is a pretty resource-intensive process, so generally we don't recommend it. Make sure your VM meets the following requirements: -*** At least 6GB of RAM -*** At least 150GB of disk space. -* 100GB of free disk space +* 100GB+ of free disk space +* 6GB+ of RAM ifeval::["{machine}" == "qemux86-64"] -* QEMU--we recommend installing it from your distro's package manager, e.g. `sudo apt-get install qemu` +* QEMU--we recommend installing it from your distro's package manager, e.g. `sudo apt install qemu` endif::[] * link:https://android.googlesource.com/tools/repo/[repo] ** link:https://source.android.com/source/downloading#installing-repo[Download the latest version] directly from Google, or -** install it from your distro's packages if available (`sudo apt-get install repo`) +** install it from your distro's packages if available (`sudo apt install repo`) + +TIP: It's possible use a virtual machine running Linux as your build machine. However, we don't recommend it. It will be slower, and you're more likely to run into difficult-to-troubleshoot issues. If you do want to use a VM despite this warning, though, make sure the VM has enough resources allocated to it. Along with the disk space and memory requirements above, we suggest allocating at least 4-6 CPU cores to the VM to speed up building. Also, make sure that you've generated your xref:generating-provisioning-credentials.adoc[provisioning credentials] first. // end::prereqs[] // tag::env-setup[] - == Create your Yocto build environment First, clone a manifest file for the quickstart project: @@ -61,7 +61,7 @@ First, clone a manifest file for the quickstart project: ---- mkdir myproject cd myproject -repo init -u https://github.com/advancedtelematic/updater-repo.git -m thud.xml +repo init -u https://github.com/advancedtelematic/updater-repo.git -m {yocto-branch}.xml repo sync ---- @@ -153,3 +153,5 @@ sudo ../meta-updater-raspberrypi/scripts/flash-image.sh <1> TIP: You can also write the image using `dd`, but since the wrong kind of typo in a dd command is so dangerous, we don't recommend it. If you really want to do it that way, though, inspect the shell script to find the required commands. Now, put the card into your Pi, plug it into a *wired* internet connection, and power it on. You should see it come online in a minute or two. It will generate a random name for itself during autoprovisioning; you can change the name later. + +include::partial$recommended-steps.adoc[tags=firstbuild-nextstep] diff --git a/docs/ota-client-guide/modules/ROOT/pages/change-signature-thresholds.adoc b/docs/ota-client-guide/modules/ROOT/pages/change-signature-thresholds.adoc new file mode 100644 index 0000000000..749b1d4903 --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/change-signature-thresholds.adoc @@ -0,0 +1,58 @@ += Change signature thresholds +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + +One way you can keep your software repository more secure is by adding extra signing keys, and then setting up *key thresholds* to require metadata to be signed by multiple different keys. By default, each role on the repository has one signing key and a threshold of one. For more security in the Root role, for example, you might want to create 5 different keys, keep them locked up at different locations around the world, and require that at least three of the keys sign off on any change to Root metadata. You can also use key thresholds for delegations. + +*To change the thresholds for signing Root metadata:* + +. Generate a new Root key. ++ +[source,bash] +---- +garage-sign key generate --repo --name root-key-1 --type rsa +---- + +. Add the new Root key to the `root.json` file. ++ +[source,bash] +---- +garage-sign root key add --repo --key-name +---- + +. If you need more Root keys, repeat steps 1 and 2. +. In the *Roles* folder of your local software repository, open the *Unsigned* folder, and then open the `root.json` file. +. Depending on the threshold that you want to change, in the `signed` block, in the `root` subsection, for the `keyids` object, specify the list of valid keys that you want to use for signing. ++ +You can find the list of all your Root keys in the `keys` subsection. ++ +NOTE: The new version of Root metadata should be valid according to the rules of the previous and current versions. So you must sign Root metadata with the threshold of keys specified in the previous Root metadata file and the threshold of keys specified in the new Root metadata file. For more information, see the https://uptane.github.io/papers/uptane-standard.1.0.1.html#rfc.section.5.4.4.3[related section] of the Uptane standard. + +. For the `version` object, specify the version number of the new Root metadata. +. Sign the `root.json` file with the same number of valid signatures that you specified in the Root key threshold. ++ +[source,bash] +---- +garage-sign root sign \ + --repo \ + -k \ + -k \ + --expires +---- + +. Push the modified `root.json` file to OTA Connect. ++ +[source,bash] +---- +garage-sign root push \ + --repo +---- + +To change the threshold of delegations, modify the .json file with the delegation metadata in the same way as the `root.json` file. + +To learn more about the `garage-sign` commands and options, see its xref:garage-sign-reference.adoc[reference] documentation. diff --git a/docs/ota-client-guide/modules/ROOT/pages/client-provisioning-methods.adoc b/docs/ota-client-guide/modules/ROOT/pages/client-provisioning-methods.adoc index 79b65eab2e..e5d82998dd 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/client-provisioning-methods.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/client-provisioning-methods.adoc @@ -14,67 +14,78 @@ endif::[] :icons: font :toc: macro -Before devices can receive updates, each device needs to have a unique identity and a certificate. The provisioning process ensures that a signed certificate is associated with each device. This process is crucial for securing communication between devices and the OTA Connect server. -OTA Connect supports two provisioning methods. These methods determine the components that will play the role of the “issuer” and the “verifier” in your infrastructure. The “issuer” is the server that signs and issues your device certificates. The “verifier” is the server that verifies the authenticity of device certificates. -We refer these two methods as “provisioning with shared credentials” and “provisioning with device credentials”. +== What is device provisioning? -* *Provisioning with shared credentials* -+ -This type of provisioning is great for testing because the OTA Connect server plays the role of both the “issuer” and “verifier” when creating device certificates. You can try out OTA Connect without involving the rest of your infrastructure. However, it’s not secure enough for a production scenario. -+ -* *Provisioning with device credentials* -+ -This is the provisioning method that you should use in production. In this scenario, the OTA Connect server doesn’t issue any credentials to devices; it simply inspects device certificates that come preinstalled. The OTA Connect server only plays the role of “verifier” and the “issuer” role is handled by your fleet certificate authority. +OTA Connect uses mutual TLS for transport security and device authentication. This means that: + +* every device needs to have its own X.509 certificate, +* every device needs to have a way to trust the X.509 certificate of the OTA Connect server's device gateway, and +* the OTA Connect server needs to have a way to decide whether it trusts each vehicle's X.509 certificate. + +The vehicle knows it can trust the gateway because each OTA Connect account gets a unique device gateway URL, with its own unique X.509 certificate. We then include that certificate in the `credentials.zip` you download, and it gets baked into the image as a pinned certificate authority (CA) when you bitbake. + +The device gateway decides whether to allow a device to connect based on the device's X.509 certificate. Every OTA Connect account has one or more *Fleet Root CAs* which are responsible for signing device certificates. When a device connects, your device gateway checks whether its certificate is signed by a trusted Fleet Root CA, and if not, rejects the connection. + +*Device provisioning*, therefore, is simply the process of providing a device with: + +* an X.509 certificate +** that is unique to the vehicle +*** and is signed by a Fleet Root CA that your OTA Connect account trusts -.How the server roles change depending on which method you choose: -[caption="Figure 1: "] -image::prov-diff-infra.png[] - +== How to provision devices -Note that in both cases, the server that plays “issuer” role, must have access to the private key and the root certificate for your fleet. The root certificate and private key are used to sign identity metadata and to verify the identity of connected devices. +OTA Connect supports two provisioning methods: "*provisioning with shared credentials*" and "*provisioning with device credentials*". No matter which method you choose, you'll end up in the same place: each of your devices will have its own signed certificate, and will use that certificate to identify itself and establish a secure communication channel with the server (via mutual TLS). + +The difference between the two methods is in how exactly that certificate gets to the vehicle, and who controls the Fleet Root CA. Let's take a closer look at how each method works. -Let’s take a close look at how each method works. == Provisioning with shared credentials -This method is called “provisioning with shared credentials” because you install a temporary provisioning key that is shared by all devices. -With this method, perform the following major steps: +This type of provisioning is the default, and is great to start with, because the OTA Connect server does everything for you: it generates a Fleet Root CA for your account, and it generates a new vehicle certificate every time a new device comes online. This allows you to try out OTA Connect without involving the rest of your infrastructure. However, this method may not be secure enough for a production scenario. + +In shared credential provisioning, the OTA Connect server creates the Fleet Root CA for you, and stores the private key for the CA in a https://www.vaultproject.io/[Vault] instance. The server also generates a credential for you that can be shared amongst all the devices in your fleet. That credential--also known as a provisioning key--is included in your `credentials.zip`, and is baked into the generic image that you flash on all your devices. + +In this method, the following steps occur: + +. You download your `credentials.zip` with the provisioning key inside. +. You bitbake an image with the default provisioning method configured, providing bitbake with your `credentials.zip`. +. You flash that image on as many devices as you wish. +. The first time each device comes online, it connects to a special endpoint on the device gateway and says, “Hi, I’m a new device, and I don’t have a TLS certificate yet. Here’s my provisioning key.” +. Our crypto service generates a new keypair and X.509 certificate for the device, signs that certificate with the private Fleet Root CA in the Vault instance, and sends the whole bundle to the vehicle. +. The crypto service deletes the vehicle’s private key and the device stores its new keypair and certificate. -* You download a temporary provisioning key from the OTA Connect server and install it on a base software image. -* You then flash your base software image to your devices. -* Once each device boots up, it uses the shared provisioning key to request a permanent device certificate from the OTA Connect server. -* The OTA Connect server verifies the provisioning key, issues the device with an X.509 certificate which is then downloaded to the device. -* The device then uses this certificate for all future transactions. +This entire transaction is secured with TLS, using the pinned device gateway certificate provisioned in the image. -This method is fine for provisioning devices quickly but if a malicious actor steals your provisioning key, there’s no way to prevent unauthorized devices from provisioning. You’d have to blacklist the provisioning key for all devices and issue a new one. +.Summary of Shared credential provisioning +image::img::shared-cred-provisioning.png[width=100%] == Provisioning with device credentials -If you’re using OTA Connect in production, you should provision with device credentials. -In this scenario, the OTA Connect server doesn’t issue any credentials to devices. You need to preinstall the device certificates yourself. -How you get the certificates on to your devices is your decision. Your chosen method can depend on many variables such as the storage method on the device, whether you have connectivity at your factory, and whether you choose an internal or third party root of trust for your certificates. -For example, you might have a deployment process where your build system requests a certificate from your CA while a software image is being built. The device certificate is then installed on the image just before it is flashed to the target device. -Once a device boots up, it connects to the OTA Connect server and provides the pre-installed device certificate to verify the device identity. The OTA Connect server then verifies that the certificate came from your CA. -With this method, it is extremely difficult for an unauthorized device to join your fleet. +We usually recommend this provisioning method if you have high cybersecurity compliance needs, and especially if you have devices with a hardware security module. In this method there is no shared credential. Instead, the following steps occur: +. You provide us with your own Fleet Root CA’s certificate (but NOT the private key). +. Then, you make sure that each device acquires, in some out-of-band process, an X.509 certificate signed by your Fleet Root CA. ++ +For maximum security, you would generate a keypair and a self-signed X.509 certificate inside an HSM on the device (so that there’s never any private key material outside of the HSM), then submit a PKCS#10 certificate signing request (CSR) to an appropriate service inside your own PKI, using an authentication method appropriate to your security needs. In an automotive OEM context, for example, that might be a private server inside the factory infrastructure, using the physical location and an airgapped network as ways to authenticate the CSR’s validity. +. Once the device has its signed certificate, it can already establish a mutual TLS connection with OTA Connect server. Any time a device that's not already in the system connects with a valid certificate, we add it to your fleet, using the CNI on the vehicle certificate as the device ID. -The following diagram summarizes the differences in how devices are provisioned between the two methods. +Note that nowhere in this process have we had to use a shared credential, nor has any private key material existed outside of the vehicle (for vehicle keys), or your own PKI (for the Fleet Root CA). We have also used mutual TLS for transport security right from the beginning. This is why we describe this process as the more secure option. -.How the server roles change depending on which method you choose: -[caption="Figure 2: "] -image::prov-diff-devices.png[] +TIP: For a more practical overview, read our xref:enable-device-cred-provisioning.adoc[step-by-step guide to setting up device credential provisioning]. -With “shared credential” provisioning, devices identified by the provisioning key and the device ID. The role of “issuer” is played by the OTA Connect server, which expects a provisioning key. -With “device credential” provisioning, you decide how devices are identified. You could generate and preinstall certificate signing request (CSR) files which provide your Certificate Authority (CA) with all the necessary identification details. +.Summary of Device credential provisioning +image::img::device-cred-provisioning.png[width=100%] == Setting up the OTA Connect Server for Provisioning -If you want to use “shared credential” provisioning, we’ll generate a fleet root certificate and private key for you and store them on the OTA Connect server. We take the security of these keys and certificates extremely seriously: following industry best practices, they are kept in a Vault instance and only taken out when you request them. -If you want to use “device credential” provisioning, you’ll need to provide us with your own fleet root certificate so that the OTA Connect server can verify devices. -Of course, you can use both methods, but in that case, we recommend that you maintain separate user accounts: +If you want to use "shared credential" provisioning, you don't have to do anything at all. When your account was created, we already generated a Fleet Root CA and keypair for you, and stored them on the OTA Connect server. We take the security of these keys extremely seriously: following industry best practices, they are kept in a Vault instance and only taken out when you request them. + +If you want to use "device credential" provisioning, provide us with your Fleet Root CA so that the OTA Connect server can verify devices. +Of course, you can use both methods, but in that case, we recommend that you maintain separate xref:ota-web::environments-intro.adoc[environments]: + +* one for testing with "shared credential" provisioning +* one for production with "device credential" provisioning -* one account for testing with “shared credential” provisioning -* one account for production with “device credential” provisioning +It is not possible to migrate from a test environment to a production environment. Therefore, we recommend that you test with devices that do not go into production or devices that can be completely wiped and reset once they are ready to deploy. +Once you are ready for production, you should use your production environment, your own Fleet Root certificate, and production devices that have their device certificates preinstalled. -Migrating devices from a test account to a production account is an extremely complex process and should be avoided. Instead, we recommend that you test with devices that will not go into production or devices that can be completely reset for production. -Once you are ready for production, you should use your production account, your own fleet root certificate, and production devices that have their device certificates preinstalled. diff --git a/docs/ota-client-guide/modules/ROOT/pages/comparing-full-filesystem-update-strategies.adoc b/docs/ota-client-guide/modules/ROOT/pages/comparing-full-filesystem-update-strategies.adoc new file mode 100644 index 0000000000..ee2d09529f --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/comparing-full-filesystem-update-strategies.adoc @@ -0,0 +1,37 @@ += Comparing full-filesystem update strategies + +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + +OSTree provides a number of very significant technological advantages over other full-filesystem updating schemes. For embedded systems that need a solution for safe, atomic, full-filesystem updates, the usual approach is to have some kind of *dual-bank* scheme. Here, we're going to take a look at the difference between OSTree and dual-bank systems, and the advantages OSTree can provide. + +== Dual-bank + +In a dual-bank system, the read-only root filesystem is kept on a different partition from the writable user space, so that when an update is needed the whole partition can be overwritten. For atomicity and safety, this read-only partition is duplicated: there are two complete copies of the filesystem, kept on different partitions, and the active partition can be selected at boot time. + +When the system needs to be updated, the new filesystem image is written to the inactive partition, and the next time the system reboots, that partition becomes the active one. + +.Dual-bank update process (click to enlarge) +[caption="Figure 1: ",link={attachmentsdir}/dual-bank-system-update-flow.svg] +image::img::dual-bank-system-update-flow.svg[] + +The main advantage of this update model is its safety. Updates are always strictly atomic, and there is always a known good image that can be rolled back to. However, there are significant trade-offs in flexibility and materials costs that must be made: the size of the root partition must be chosen when the system is flashed for the very first time, and the duplication of the root partition doubles the space required. When choosing how big to make the root partition, a device manufacturer has to consider not just how big their filesystem image currently is, but also must estimate and plan for the size of all future updates. If the size chosen is too small, it may restrict the ability to add new features. Making it larger, of course, adds to the bill of goods for the product--and since it's duplicated, every extra megabyte of future capacity actually costs two megabytes to accommodate. + +== OSTree + +OSTree checksums individual files and stores them as content-addressed objects, much like git. The read-only filesystem is built by "checking out" a particular revision, and hardlinking the content-addressed objects into the actual Linux directory structure. Multiple filesystem versions can be stored, and any content that is duplicated across versions is only stored once. A complete history of all versions is stored in TreeHub, but it is not required to store that complete revision history on the device. Only one partition is needed--writable user space can be on the same partition as the OSTree content store. + +When the system needs to be updated, {product-name} sends a small metadata file with a particular commit identifier. The client pulls that commit from TreeHub, only downloading the new files, and only downloading binary diffs of changed files. Once the pull is complete and verified, the system is instructed to boot into the new version the next time it starts up. + +.OSTree update process (click to enlarge) +[caption="Figure 2: ",link={attachmentsdir}/ostree-update-flow.svg] +image::img::ostree-update-flow.svg[] + +With OSTree, you no longer need to guess how much room you might need in the future to expand your system; the OSTree content store expands and contracts as needed. You also save a significant amount of space, since only diffs between versions need to be stored. OSTree also allows you to garbage-collect old images: if you upgrade 1.0 -> 1.1 -> 1.2, for example, by default the {product-name-short} client will garbage-collect all local objects unique to 1.0. If you decided later on that you in fact did want to go back to v1.0, you still could: if you pushed v1.0 from {product-name-short}, the client would download only the diff from TreeHub, repopulate the local object store, and then reboot into that version. Of course, it's also possible to configure OSTree to keep more than two revisions on the local disk; this can be particularly useful in QA workflows, allowing for rapid testing of a feature or an external integration against multiple different firmware versions. + +Best yet, you get all of these benefits *without having to give up the safety of a dual-bank setup*. Updates are still strictly atomic; if power is lost during the download of an update, the client will still boot into the old system when it starts up next, and will simply resume the download it had begun. You still always have a known good image on the system to roll back to; in fact, as stated above, you can keep an arbitrarily large number of revisions--an impossibility in a dual-bank system. diff --git a/docs/ota-client-guide/modules/ROOT/pages/cross-deploy-images.adoc b/docs/ota-client-guide/modules/ROOT/pages/cross-deploy-images.adoc index e62f221024..c6acda6fe5 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/cross-deploy-images.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/cross-deploy-images.adoc @@ -1,4 +1,4 @@ -= Transfer disk images to a software repository in another account += Transfer disk images to a software repository in another environment ifdef::env-github[] [NOTE] @@ -14,20 +14,20 @@ endif::[] :icons: font :sectnums: -For our recommended production workflow, you will need to move disk images from one account to another from time to time. For example, you might want to send a development build that you're happy with to the QA team, or send that build to the deployment team once it's passed QA. You can do this with our `garage-deploy` tool. +For our recommended production workflow, you will need to move disk images from one environment to another from time to time. For example, you might want to send a development build that you're happy with to the QA team, or send that build to the deployment team once it's passed QA. You can do this with our `garage-deploy` tool. Before you start, make sure that you've installed the xref:install-garage-sign-deploy.adoc[`garage-deploy`] tool first. -To transfer disk images to a different account, follow these steps: :: +To transfer disk images to a different environment, follow these steps: :: . xref:getstarted::generating-provisioning-credentials.adoc[Download provisioning keys] for both accounts. + We'll assume that you named them `source-credentials.zip` and `dest-credentials.zip`. + -. Select an image and commit ID to deploy, and the hardware ID(s) to deploy it to +. Select an image and commit ID to deploy, and the hardware ID(s) to deploy them to. + -The image name is the one that appears in your {product-name} account--it will be the same as the `MACHINE` setting in Yocto by default, or the `OSTREE_BRANCHNAME` option if you set it. The commit ID is the hash of the OSTree commit, visible in the package details. The hardware IDs are for the destination account, and are equivalent to the `MACHINE` setting in your Yocto build. +The image name is the one that appears in your {product-name} account--it is the same as the `MACHINE` setting in Yocto by default, or the `OSTREE_BRANCHNAME` option if you set it. The commit ID is the hash of the OSTree commit, visible in the package details. The hardware IDs are for the destination environment, and are equivalent to the `MACHINE` setting in your Yocto build. + -. Run `garage-deploy` +. Run `garage-deploy`. + You can see the available options with `--help`: + @@ -48,11 +48,11 @@ garage-deploy command line options: same format as curl --cacert ---- + -For example, to deploy an image called `acme-modelB` with SHA `001ee11a28e3e08f3e93e31425f0721a7fb44946919284b629ca85a1cc3073cb` and make it installable on all Raspberry Pi devices on your target account, the command would be: +For example, to deploy an image called `acme-modelB` with SHA `001ee11a28e3e08f3e93e31425f0721a7fb44946919284b629ca85a1cc3073cb` and make it installable on all Raspberry Pi devices in your target environment, use the following command: + ---- garage-deploy --commit 001ee11a28e3e08f3e93e31425f0721a7fb44946919284b629ca85a1cc3073cb \ --name acme-modelB -f source-credentials.zip -p dest-credentials.zip -h raspberrypi3 ---- + -. Log into the destination account, and verify that your image has been deployed +. Go to your destination environment and verify that your image is deployed. diff --git a/docs/ota-client-guide/modules/ROOT/pages/customise-targets-metadata.adoc b/docs/ota-client-guide/modules/ROOT/pages/customise-targets-metadata.adoc new file mode 100644 index 0000000000..d5e266d8d6 --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/customise-targets-metadata.adoc @@ -0,0 +1,114 @@ += Add custom metadata fields to Targets metadata +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + +include::partial$aktualizr-version.adoc[] + +You may want to include extra metadata about your software inside the signed Uptane metadata that OTA Connect delivers to your device. Some reasons you might want to do this include: + +* To provide installation instructions or scripts for an image that cannot or should not be included in the image itself. +* To add extra tracking fields for internal compliance or auditing. + +To accommodate this use case, you can manually manage your Uptane metadata and add custom fields to your `targets.json` file. + +NOTE: For more information on additional use cases that require customization of the Targets metadata, see the https://uptane.github.io/papers/uptane-standard.1.0.1.html#rfc.section.5.2.3.1.1[related section] of the Uptane standard. + +== Prerequisites + +Before you write a custom metadata handler, do the following: + +* xref:libaktualizr-why-use.adoc[Build your own update client] using libaktualizr. ++ +To learn how to access custom metadata in the embedded client, see the https://advancedtelematic.github.io/aktualizr/index.html[Doxygen API reference]. + +* xref:rotating-signing-keys.adoc[Rotate your signing keys offline]. ++ +The following instructions assume that you have already done this, and know where to find your `targets.json`. + +== Anatomy of the `targets.json` metadata + +Your `targets.json` file includes: + +* The `signatures` block that contains key IDs and signatures generated over the `signed` block. +* The `signed` block that contains the Uptane fields. +* In the `signed` block, the `targets` block that lists all of your software versions. ++ +include::garage-sign-reference.adoc[tags=target-term] ++ +Each target is identified by its name and version and contains three objects: + +** `hashes` – the SHA256 hash of the software version. +** `length` – the length of the target, in bytes. +** `custom` – other metadata that aktualizr uses. ++ +.The `custom` object +==== +[source,json] +---- +"custom": { + "name": "aegisub-font", + "version": "1", + "hardwareIds": [ + "kmk-docker-debian" + ], + "targetFormat": null, + "uri": null, + "createdAt": "2018-08-20T09:28:27Z", + "updatedAt": "2018-08-20T09:28:27Z" +} +---- +==== +You can add your custom metadata to the `custom` object. + +== Add custom metadata + +You can always modify your `targets.json` file to include more metadata. + +*To add custom metadata:* + +. In the `targets.json` file, navigate to the `custom` object, and specify any metadata fields that you want to add. ++ +Do not modify any of the existing values. We recommend to add a new field namespaced to your organization or some other unique identifier, and then put any custom sub-keys under that field, as in the example below. ++ +.Custom metadata +===== +[source,json] +---- +"custom": { + "name": "aegisub-font", + "version": "1", + "hardwareIds": [ + "kmk-docker-debian" + ], + "targetFormat": null, + "uri": null, + "createdAt": "2018-08-20T09:28:27Z", + "updatedAt": "2018-08-20T09:28:27Z", + "acme_inc_metadata": { + "application_install_handler": "com.dockerconfig.packager", + "build_correlation_id": "2ce4ebaf-b3ca-411b-977f-cd6b98065d88" + } +} +---- +===== + +. Sign the modified metadata. ++ +---- +garage-sign targets sign --repo myimagerepo --key-name mytargets +---- + +. Upload your customized `targets.json` to OTA Connect. ++ +---- +garage-sign targets push --repo myimagerepo +---- + +NOTE: If you want to add custom metadata while bitbaking, modify the `IMAGE_CMD_garagesign` function in link:https://github.com/advancedtelematic/meta-updater/blob/master/classes/image_types_ostree.bbclass#L217[image_types_ostree.bbclass]. For more information, see the http://www.yoctoproject.org/docs/{yocto-version}/dev-manual/dev-manual.html[Yocto Reference Manual]. + +To learn more about the `garage-sign` commands and options, see its xref:garage-sign-reference.adoc[reference] documentation. diff --git a/docs/ota-client-guide/modules/ROOT/pages/deb-package-install.adoc b/docs/ota-client-guide/modules/ROOT/pages/deb-package-install.adoc index 0fbcee858e..cd38793cff 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/deb-package-install.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/deb-package-install.adoc @@ -1,4 +1,4 @@ -== Installing aktualizr via debian package += Installing aktualizr via debian package ifdef::env-github[] [NOTE] @@ -14,7 +14,7 @@ Aktualizr makes .deb packages available via the https://github.com/advancedtelem sudo apt install ./aktualizr.deb ---- -=== Setting up aktualizr +== Setting up aktualizr The debian package will install, enable, and start an `aktualizr` systemd service immediately after it's installed. However, there are some configuration steps that should be taken before the service starts. To use aktualizr with a server (i.e. https://github.com/advancedtelematic/ota-community-edition/[OTA Community Edition] or https://docs.ota.here.com[HERE OTA Connect]), you will need to download the provisioning credentials file provided by the server and place it at `/var/sota/sota_provisioning_credentials.zip`. @@ -22,11 +22,11 @@ You can pass any other command-line arguments in this file, as well. For security reasons, we recommend creating the `/usr/lib/sota/sota.env` file even if you aren't going to use it. The file should be owned by root, with `600` permissions. -=== Secondary ECUs +== Secondary ECUs -The debian package ships with a default secondary ECU configured. This acts like a dummy device, dropping whatever file you send it into `/tmp/demo-virtual-secondary/firmware.bin`. +The debian package ships with a default Secondary ECU configured. This acts like a dummy device, dropping whatever file you send it into `/tmp/demo-virtual-secondary/firmware.bin`. -=== Building the debian package +== Building the debian package After following the main build setup steps, just `make package` instead of `make` to create a debian package from your current branch, for example: @@ -39,6 +39,6 @@ cmake -DCMAKE_BUILD_TYPE=Debug -DBUILD_DEB=ON .. make package ---- -=== Making a Release on github +== Making a Release on github The process for releasing a new version of aktualizr and updating the documentation is described xref:release-process.adoc[here]. diff --git a/docs/ota-client-guide/modules/ROOT/pages/debugging-tips.adoc b/docs/ota-client-guide/modules/ROOT/pages/debugging-tips.adoc index 534ef16b62..593729cb47 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/debugging-tips.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/debugging-tips.adoc @@ -25,7 +25,7 @@ Try the scripts available in the link:{aktualizr-github-url}/tests/ostree-script == Inspect stored info with aktualizr-info -The aktualizr-info tool can be used to dump information stored in the libaktualizr database. By default, it displays basic information such as storage type, device ID, primary ECU serial and hardware ID and provisioning status. Additional information can be requested with link:{aktualizr-github-url}/src/aktualizr_info/main.cc[various command line parameters]. +The aktualizr-info tool can be used to dump information stored in the libaktualizr database. By default, it displays basic information such as storage type, device ID, Primary ECU serial and hardware ID and provisioning status. Additional information can be requested with link:{aktualizr-github-url}/src/aktualizr_info/main.cc[various command line parameters]. == Valgrind and gdb @@ -74,7 +74,7 @@ uptane-generator generate Then, serve the generated directory using a web server such as the link:{aktualizr-github-url}/tests/fake_http_server/fake_test_server.py[fake test server]. -For more information about using uptane-generator, see xref:uptane-generator.adoc[uptane-generator.adoc]. +For more information about using uptane-generator, see the xref:uptane-generator.adoc[uptane-generator article]. Here is an example configuration for nginx: @@ -98,7 +98,7 @@ server { == Inject faults -See xref:fault-injection.adoc[fault-injection.adoc] +See the xref:fault-injection.adoc[fault injection article] for more information. == Developing and debugging with an OpenEmbedded system @@ -160,7 +160,7 @@ $ gdb aktualizr In CLion the remote debugger is configured as follows: -image::clion-debugger.png[CLion GDB configuration] +image::img::clion-debugger.png[CLion GDB configuration] It is also possible to run it inside valgrind: diff --git a/docs/ota-client-guide/modules/ROOT/pages/deploy-checklist.adoc b/docs/ota-client-guide/modules/ROOT/pages/deploy-checklist.adoc index 6f33bfbbb7..a38ab36547 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/deploy-checklist.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/deploy-checklist.adoc @@ -8,43 +8,43 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} endif::[] -OTA Connect is designed to integrate easily into development workflows: you build your image, push it, set auto-updates for some of your test-bench devices, and so on. But once you're ready to move from testing into production, you will likely want to do a few things differently. +OTA Connect is designed to integrate easily into development workflows: you build your image, push it, set auto-updates for some of your test-bench devices, and so on. But once you're ready to move from testing into production, you will likely want to do a few things differently. Here is a checklist for all the tasks you should consider when moving to production: [cols="2,5a,2a",options="header"] |==================== | Task | Summary | Documentation -|**Register the root certificate for your fleet ** | -* If you followed our recommendations, you should have accounts for development, testing and production. -** If you also followed our recommendation to use device-credential povisioning, you need to register your fleet root certificate with your production account +|**Register the root certificate for your fleet ** | +* If you followed our recommendations, you should have separate environments for development, testing, and production. +** If you also followed our recommendation to use device-credential provisioning, you need to register your Fleet Root certificate with your production environment. -* You might have already registered a self-signed root certificate with your test account. +* You might have already registered a self-signed root certificate with your test environment. + -However, regardless of the type of certficate that you use, you'll need to register a new certificate with your *production* account. | +However, regardless of the type of certficate that you use, you'll need to register a new certificate with your *production* environment. | * xref:client-provisioning-methods.adoc[Device provisioning methods] * xref:provide-root-cert.adoc[Register the Root Certificate for your Fleet] -|**Generate and install final device certs** | -* Once you have your final fleet root certificate, you can use it to generate and sign device certificates. +|**Generate, sign, and install production device certs** | +* Once you have your production Fleet Root CA, you can use it to sign device certificates. + -You can then automate the process of installing device certificates on your devices. +You can then automate the process of either generating the device certificates on your devices and getting them signed via PKCS#10 CSR, or of generating and signing the keys and certs externally, and then installing them into a secure place on each device. * We can’t tell you exactly how to automate this process, but you can use the commands from our documentation as a guideline. -| +| * xref:generate-devicecert.adoc[Generate a device certificate] * xref:enable-device-cred-provisioning.adoc[Enable device-credential provisioning and install device certificates] -|**Rotate production keys** | -* In line with our security concept, We recommend that you sign disk images with secure, offline keys. +|**Rotate production keys** | +* In line with our security concept, we recommend that you sign the software version with secure, offline keys. -* Even if you've done this already for with a test account, you need to do it again with a `credentials.zip` from your production account. +* Even if you've done this already in a test environment, you need to do it again with a `credentials.zip` file from your production environment. -* You should keep these keys on a secure storage medium such as a link:https://www.yubico.com/[YubiKey]. You would only plug in your YubiKey when you need to sign metadata on your local computer.| xref:rotating-signing-keys.adoc[Manage keys for software metadata] +* You should keep these keys on a secure storage medium such as a link:https://www.yubico.com/[YubiKey]. Only plug in your YubiKey when you need to sign metadata on your local computer.| xref:rotating-signing-keys.adoc[Manage keys for software metadata] -|**Transfer disk images to your production repository** | -* When you're ready to deploy your software to production, you'll need to move all approved disk images from the software repository in your testing account to the one in your production account. | xref:cross-deploy-images.adoc[Transfer software to another repository] -|**Create production-ready client configuration** | +|**Transfer disk images to your production repository** | +* When you're ready to deploy your software to production, you'll need to move all approved disk images from the software repository in your testing environment to the one in your production environment. | xref:cross-deploy-images.adoc[Transfer software to another repository] +|**Create production-ready client configuration** | * You'll need to update the configuration for aktualizr or libaktualizr. + -Settings that are convenient for testing, such as small polling invervals, are not suitable for production and should be changed. | xref:recommended-clientconfig.adoc[Recommended client configurations] -|==================== \ No newline at end of file +Settings that are convenient for testing, such as small polling invervals, are not suitable for production and should be changed. | xref:recommended-clientconfig.adoc[Recommended client configurations] +|==================== diff --git a/docs/ota-client-guide/modules/ROOT/pages/developer-tools.adoc b/docs/ota-client-guide/modules/ROOT/pages/developer-tools.adoc index 009db8809c..760324808c 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/developer-tools.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/developer-tools.adoc @@ -45,7 +45,7 @@ The meta-updater Yocto layer contains "recipes" related to software updating. Yo Later in this guide, we go into xref:yocto.adoc[more detail about how Yocto works]. -| *OSTree* | OSTree is a tool for managing Linux filesystems, and delivers atomic updates with rollback capability and built-in delta support. OSTree has several advantages over traditional dual-bank systems, but the most important one is that it minimizes network bandwidth and data storage footprint by sharing files with the same contents across file system deployments. +| *OSTree* | OSTree is a tool for managing Linux filesystems, and delivers atomic updates with rollback capability and built-in delta support. OSTree has several advantages over traditional dual-bank systems, but the most important one is that it minimizes data usage, both in transit and in storage, by sharing files with the same contents across file system deployments. You probably won't have to use the OSTree CLI often, but it's handy for troubleshooting build issues. diff --git a/docs/ota-client-guide/modules/ROOT/pages/device-cred-prov-steps.adoc b/docs/ota-client-guide/modules/ROOT/pages/device-cred-prov-steps.adoc index b333f63aaa..98e21f80a0 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/device-cred-prov-steps.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/device-cred-prov-steps.adoc @@ -24,9 +24,9 @@ include::partial$how-prov-with-device-cred.adoc[] * *xref:provide-root-cert.adoc[Provide us with the root certificate for your fleet]* + We'll need to register this certificate for your account so that OTA Connect can validate your device certificates. -* *xref:install-device-certs.adoc[Install your device certificates on devices]* +* *xref:enable-device-cred-provisioning.adoc[Install your device certificates on devices]* + The installation process happens outside of OTA Connect but we can give you some pointers on how to set up this process. -* *xref:enable-device-cred-provisioning.adoc[Configure the OTA Connect client to use your device certificates for provisioning]* +* *Configure the OTA Connect client to use your device certificates for provisioning* + -We show you how to do this with or without an HSM. \ No newline at end of file +If you followed the example in the previous step, the client will already be configured. For custom setups, you may need to do some xref:aktualizr-config-options.adoc[manual configuration]. diff --git a/docs/ota-client-guide/modules/ROOT/pages/device-cred-prov-teststeps.adoc b/docs/ota-client-guide/modules/ROOT/pages/device-cred-prov-teststeps.adoc deleted file mode 100644 index aa70bd5518..0000000000 --- a/docs/ota-client-guide/modules/ROOT/pages/device-cred-prov-teststeps.adoc +++ /dev/null @@ -1,31 +0,0 @@ -= Text Device-credential Provisioning -ifdef::env-github[] - -[NOTE] -==== -We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. -==== -endif::[] - - -Although shared-credential provisioning is useful for evaluating OTA Connect, we don't recommend that you use it in production. - -If you want to test provisioning properly, you should provision devices with their own certificates. In a production scenario, you'll need to automate the process of provisioning devices with their own certificates, but for testing you can provision devices manually. - -The following major steps show you how to provision test devices with device certificates: - -* xref:generate-selfsigned-root.adoc[Generate a test root certificate]. -+ -If you don't yet have your fleet's root certificate, we show you how to generate one yourself for testing. - -* xref:provide-testroot-cert.adoc[Provide us with your test root certificate] -+ -We'll need to register your test root certificate with a test account, so that the OTA Connect server can verify your test device certificates. - -* xref:generatetest-devicecert.adoc[Generate and sign a test device certificate] -+ -Once you've generated a test root certificate, you can use it to sign a test device certificate. - -* xref:enable-device-cred-provtest.adoc[Enable and install the device certificate] -+ -We show you how to enable device-credential provisioning and install a device certificate on a test device. Once you've provisioned test devices with certificates, they can authenticate with the OTA Connect server. \ No newline at end of file diff --git a/docs/ota-client-guide/modules/ROOT/pages/device-monitoring-with-zabbix.adoc b/docs/ota-client-guide/modules/ROOT/pages/device-monitoring-with-zabbix.adoc new file mode 100644 index 0000000000..96f4e32ac2 --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/device-monitoring-with-zabbix.adoc @@ -0,0 +1,377 @@ += Device monitoring with Zabbix + +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== + +:attachmentsdir: ../assets/attachments +endif::[] + +This document gives a step by step guide on how to set up, build and run a Zabbix server and agent in order to monitor an Aktualizr update client running on QEMU and collect information about system resource usage, like memory usage and CPU utilization. + +You might want to use this guide as a template for configuring zabbix monitoring on a real device, to do performance testing on your real hardware. Or, if you are contributing to aktualizr development or developing your own client based on libaktualizr, you might want to use a setup like this to profile the performance impact of your changes. + +For more information about Zabbix in general, please refer to the https://www.zabbix.com/documentation/4.0/[official documentation]. + +== Install and setup Zabbix server on Ubuntu 16/18 + +. Install Apache, MySQL, and PHP. ++ +---- +sudo apt-get update +sudo apt-get install apache2 libapache2-mod-php +sudo apt-get install mysql-server +sudo apt-get install php php-mbstring php-gd php-xml php-bcmath php-ldap php-mysql +---- + +. Get the current time zone. ++ +---- +timedatectl status | grep "Time zone" +---- + +. In the PHP configuration file, update the time zone. ++ +Example of the configuration file: `/etc/php/7.2/apache2/php.ini` ++ +---- +... +[Date] +; http://php.net/date.timezone +date.timezone = 'Europe/Berlin' +... +---- + +. Depending on the Ubuntu version, enable the required repository. +** Ubuntu 18.04 LTS (Bionic) ++ +---- +wget https://repo.zabbix.com/zabbix/4.0/ubuntu/pool/main/z/zabbix-release/zabbix-release_4.0-3+bionic_all.deb +sudo dpkg -i zabbix-release_4.0-3+bionic_all.deb +---- + +** Ubuntu 16.04 LTS (Xenial) ++ +---- +$ wget https://repo.zabbix.com/zabbix/4.0/ubuntu/pool/main/z/zabbix-release/zabbix-release_4.0-3+xenial_all.deb +$ sudo dpkg -i zabbix-release_4.0-3+xenial_all.deb +---- + +. Install Zabbix server. ++ +---- +sudo apt-get update +sudo apt-get install zabbix-server-mysql zabbix-frontend-php zabbix-agent +---- + +. Update the root password. ++ +---- +sudo su +mysql -u root +mysql> UPDATE mysql.user SET authentication_string=PASSWORD('new-password') WHERE USER='root'; +mysql> FLUSH PRIVILEGES; +mysql> exit; +exit; +---- + +. Create a database (DB) schema for Zabbix server. ++ +---- +sudo mysql -u root -p +mysql> CREATE DATABASE zabbixdb character set utf8 collate utf8_bin; +mysql> CREATE USER 'zabbix'@'localhost' IDENTIFIED BY 'zabbix'; +mysql> GRANT ALL PRIVILEGES ON zabbixdb.* TO 'zabbix'@'localhost' WITH GRANT OPTION; +mysql> FLUSH PRIVILEGES; +cd /usr/share/doc/zabbix-server-mysql +zcat create.sql.gz | mysql -u zabbix -p zabbixdb +---- + +. Edit the Zabbix configuration file. ++ +Example of the configuration file: `/etc/zabbix/zabbix_server.conf` ++ +---- +DBHost=localhost +DBName=zabbixdb +DBUser=zabbix +DBPassword=zabbix +---- + +. Restart Apache and Zabbix. ++ +---- +sudo systemctl restart apache2 +sudo systemctl restart zabbix-server +---- + +. Go to the http://localhost/zabbix/[Zabbix localhost] and complete the installation wizard: +.. On the **Welcome** page, click **Next Step**. +.. Check if all the prerequisites are met, and then click **Next Step**. +.. Specify the DB connection details: +... From **Database Type**, select **MySQL**. +... In **Database Host**, make sure that **localhost** is specified. +... In **Database Port**, make sure that **0** is specified. +... In **Database Name**, specify **zabbixdb**. +... In **User**, specify **zabbix**. +... In **Password**, specify **zabbix**. +.. On the **Zabbix Server Details** page, click **Next Step**. +.. Review the settings summary, and then click **Next Step**. +.. Download the configuration file and place it in the same subdirectory to which you copied the Zabbix PHP files. +.. Finish the installation. ++ +To sign in to Zabbix, use the following default user name and password: ++ +---- +user: Admin +pass: zabbix +---- + +== Build an OTA image for QEMU + +. Clone the manifest file for the quickstart project and download the basic Yocto layers. ++ +For instructions, see https://docs.ota.here.com/getstarted/dev/qemuvirtualbox.html#_create_your_yocto_build_environment[the related section] in the Get Started guide. +. To set up the Yocto environment, do one of the following: +** Build a QEMU non-OSTree image. ++ +---- +source meta-updater/scripts/envsetup.sh qemux86-64 poky +---- ++ +NOTE: It is important to specify `SOTA_HARDWARE_ID` in your conf/local.conf for non-OSTree case in order to separate updates for OSTree and non-OSTree cases. ++ +Edit your conf/local.conf file and add `SOTA_HARDWARE_ID`, provide your desired hardware id, see the example below. ++ +---- +SOTA_HARDWARE_ID="qemux86-64-non-ostree" +---- + +** Build a QEMU image with a default package manager OSTree. ++ +---- +source meta-updater/scripts/envsetup.sh qemux86-64 poky-sota-systemd +---- +. Open your conf/local.conf file and, at the end of the file, add the following lines: ++ +---- +IMAGE_INSTALL_append += "procps" +IMAGE_INSTALL_append += " zabbix" +SOTA_COMM_CONF_ZABBIX_SERVER="10.0.2.2" <1> +SOTA_COMM_CONF_ZABBIX_SERVERACTIVE="10.0.2.2" <1> +---- +<1> Substitute the IP address of the system the Zabbix server is running on, as needed. Note that the IP address needs to be hard-coded, so if you are running this on a real device, it is recommended to run the server on something with a static IP. ++ +NOTE: The default host name of the QEMU image is `qemux86-64`. + + +== Run a QEMU image + +. Depending on the QEMU image that you built, to make the Zabbix agent visible for the server, do one of the following: + +** Run the QEMU non-OSTree image. ++ +---- +../meta-updater/scripts/run-qemu-ota --uboot-enable=no --host-forward="tcp:0.0.0.0:10555-:10050" +---- + +** Run the QEMU image created with the default `OSTree` package manager. ++ +---- +../meta-updater/scripts/run-qemu-ota --overlay mydevice.cow --host-forward="tcp:0.0.0.0:10555-:10050" +---- + +. Install the `zabbix_get` util. ++ +---- +sudo apt-get install zabbix-get +---- ++ +. Check if the Zabbix agent is accessible. ++ +---- +zabbix_get -s localhost -p 10555 -k agent.ping +---- ++ +If `zabbix-agent` is accessible, the command returns `1` + + +== Run a QEMU image in the background mode + +Follow this Systemd Unit example: +---- +[Unit] +Description=QEMU Zabbix Test agent +After=network.target networkd.service ntpd.service +[Service] +Type=simple +WorkingDirectory= +ExecStart=/meta-updater/scripts/run-qemu-ota --dir /tmp/deploy/images --uboot-enable=no --host-forward="tcp:0.0.0.0:10555-:10050" --no-gui +Restart=always +RestartSec=5 +LimitNOFILE=10000 +[Install] +WantedBy=multi-user.target +---- + + +== Add a template and configure a host for Zabbix server + +=== Import a Zabbix template +link:{attachmentsdir}/aktualizr-monitoring-zabbix-template.xml[Download Zabbix template] + +. Go to the Zabbix server dashboard (localhost/zabbix). +. Go to **Configuration** > **Templates**. +. Click **Import**, and then select **aktualizr-monitoring-zabbix-template.xml**. +. Select **Crete New / Screens**. +. Click **Import**. + +=== Add a host for monitoring + +. Go to **Configuration** > **Host**. +. Click **Create Host**. +. Specify the hostname. ++ +NOTE: The hostname must be the same as the one you configured in the <> section. + +. In the **Groups** section, click **Select**, and then select **Template/Applications**. +. In the **Agent interfaces** section, provide the IP address and desired port. ++ +NOTE: The IP address for the QEMU is the localhost (127.0.0.1). Also, use the port that you provided in the <> section. +. On the **Templates** tab, in the **Link New Templates** section, click *Select*, and then select the **Aktualizr Client** template. +. Click the **Add** hyperlink, and then click the **Add** button. +. Go to **Monitoring** > **Graphs**. +. On the **Group** menu, select **All** **Host qemux86-64** and **Graph aktualizr.rss.memory.usage.graph**. + +== Zabbix agent hostname +By default Zabbix-agent hostname is configured to `Hostname=Zabbix server`, this hostname is used in Zabbix-agent's request to the server. + +If you want to be able to monitor more than one device, each device must have a unique hostname in order to be +recognized on the Zabbix server, there are two possible ways to achieve this goal: + +. Manually configure each individual device by setting Hostname in zabbix-agent.conf file. +. To create simple service with the script which will make this configuration at boot time. + +=== Manual configuration: +1. Connect to your device using ssh +2. Change the value of the Hostname variable in `/etc/zabbix-agent.conf` to the desired name, for example `Hostname=my_awesome_device` + +=== Set Zabbix hostname at boot time applying aktualizr DEVICE_ID: +In order to automate this process, we have to create our own recipe which will automatically install necessary files into our image. + +If you have your own meta-layer you can put new recipe into `recipes-extended` folder or for the testing purposes, +you can use `meta-updater/recipes-extended` folder as well. + +1. Make a new directory in `recipes-extended` with the name `zabbixhostname` and create all necessary files and subfolders. ++ +---- +zabbixhostname/ +├── files +│ ├── zabbix-hostname.service +│ └── zabbix-hostname.sh +└── zabbix-hostname.bb +---- + +2. Edit each file in the `zabbixhostname` folder and copy the content below for each individual file respectively. ++ +.zabbix-hostname.bb +[source, bash] +---- +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://${COMMON_LICENSE_DIR}/MIT;md5=0835ade698e0bcf8506ecda2f7b4f302" +SRC_URI = " file://zabbix-hostname.service \ + file://zabbix-hostname.sh \ +" +REQUIRED_DISTRO_FEATURES= "systemd" +inherit systemd +SYSTEMD_PACKAGES = "${PN}" +SYSTEMD_SERVICE_${PN} = "zabbix-hostname.service" +do_install_append() { + #install -d ${D}${systemd_unitdir} + install -d ${D}${systemd_unitdir}/system + install -m 0644 ${WORKDIR}/zabbix-hostname.service ${D}${systemd_unitdir}/system/zabbix-hostname.service + install -d ${D}${bindir} + install -m 0755 ${WORKDIR}/zabbix-hostname.sh ${D}${bindir}/zabbix-hostname.sh +} +FILES_${PN} += " ${systemd_unitdir}/system/zabbix-hostname.service \ + ${bindir}/zabbix-hostname.sh \ +" +---- ++ +.files/zabbix-hostname.service +---- +[Unit] +Description=Update Zavbbix Hostnam +After=syslog.target network.target aktualizr.service +[Service] +Type=oneshot +User=root +ExecStart=/usr/bin/zabbix-hostname.sh /usr/bin/aktualizr-info +RemainAfterExit=false +StandardOutput=journal +[Install] +WantedBy=multi-user.target +---- ++ +.file/zabbix-hostname.sh +[source, bash] +---- +#!/bin/sh +S=1 +$1 > /tmp/aktualizr-info-tmp +while read -r line; do + if [[ "${line:0:10}" == "Device ID:" ]]; then + grep -Fxq "Hostname=${line:11}" /etc/zabbix_agentd.conf + if [[ $? -eq 1 ]]; then + sed -i "s:^Hostname=Zabbix server:Hostname=${line:11}:g" /etc/zabbix_agentd.conf + systemctl restart zabbix-agent.service + fi + S=0 + fi +done < "/tmp/aktualizr-info-tmp" +rm /tmp/aktualizr-info-tmp +if [[ "${S}" -ne 0 ]]; then + echo "ERROR: Reading aktualizr info failed. Restart service!" + sleep 20 + systemctl restart zabbix-hostname.service + exit 1 +fi +---- + +3. Install `zabbixhostname` recipe, adding it into your `conf/local.conf` file ++ +---- +IMAGE_INSTALL_append += "zabbixhostname" +---- + +4. Bitbake an image ++ +---- +bitbake core-image-minimal +---- + +== Zabbix User Parameters (creating custom key) +Please refer to the official https://www.zabbix.com/documentation/4.0/manual/config/items/userparameters[zabbix documentation] for a complete description. + +=== Monitoring number of threads created by the process: +1. Connect to device using SSH ++ +QEMU: ++ +---- +ssh -o StrictHostKeyChecking=no root@localhost -p 2222 +---- +2. Add user parameter to zabbix config file ++ +---- +echo 'UserParameter=aktualizr.threads.count,ps huH p $(pgrep aktualizr) | wc -l' >> /etc/zabbix_agentd.conf +---- +3. Restart zabbix agent service ++ +---- +systemctl restart zabbix-agent +---- + diff --git a/docs/ota-client-guide/modules/ROOT/pages/ecu_events.adoc b/docs/ota-client-guide/modules/ROOT/pages/ecu_events.adoc index 8de2a3c429..197f03fabe 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/ecu_events.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/ecu_events.adoc @@ -7,6 +7,8 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] +// AB: This page is currently out of date and will be temporarily hidden in the developer guide portal to be updated later and connected to the API documentation. + :aktualizr-github-url: https://github.com/advancedtelematic/aktualizr/tree/master This is a technical document describing the format of the events reported to the back-end by aktualizr during an ongoing update installation. diff --git a/docs/ota-client-guide/modules/ROOT/pages/enable-device-cred-provisioning.adoc b/docs/ota-client-guide/modules/ROOT/pages/enable-device-cred-provisioning.adoc index f85f87271e..e779bc905b 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/enable-device-cred-provisioning.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/enable-device-cred-provisioning.adoc @@ -1,4 +1,4 @@ -= Enable device-credential provisioning and install device certificates += Enable device-credential provisioning and install device certificates ifdef::env-github[] [NOTE] @@ -12,16 +12,16 @@ endif::[] Once you're ready to provision devices in production, you need to build disk images that are configured to use device-credential provisioning. -After you have flashed those images to devices, you boot the image and install the device certicate for each device. +After you have flashed those images to devices, you boot the image and install the device certicate for each device. How you get the certificate on the device is up to you: * You could have your HSM generate a private key and certificate directly on the device. + -You woud then sign the certificate with your fleet root key. +You would then use your Fleet Root CA to sign the device's certificate, for example using a PKCS#10 request. + ** We don't have documentation on how to do this, since the method is different for each HSM model. -* You can also generate the device certificates and private keys on your development computer and copy them over to the device. +* You can also generate the device certificates and private keys on your development computer and copy them over to the device. ** For these instructions, we'll assume you are using this latter method. == HSM considerations @@ -55,9 +55,9 @@ IMAGE_INSTALL_append = " softhsm-testtoken dropbear " + [NOTE] ==== -The line `IMAGE_INSTALL_append` installs optional software to your device. +The line `IMAGE_INSTALL_append` installs optional software to your device. -* The option `dropbear` installs the link:https://matt.ucc.asn.au/dropbear/dropbear.html[Dropbear] ssh server. +* The option `dropbear` installs the link:https://matt.ucc.asn.au/dropbear/dropbear.html[Dropbear] ssh server. + You'll need to ssh into the device to copy the certificates to the device's filesystem. * The option `softhsm-testtoken` installs SoftHSM to so that you can easily test HSM interactions. @@ -76,11 +76,11 @@ scp -P 2222 autoprov.url root@localhost:/var/sota/import/gateway.url + [NOTE] ==== -You might remember that `credentials.zip` contains a provisioning key shared-credential provisioning. In this case we just need the `autoprov.url` file inside `credentials.zip`. This file contains the URL of your device gateway which is specific to your account. +You might remember that `credentials.zip` normally contains a provisioning key for shared-credential provisioning. In this case, we just need the `autoprov.url` file inside `credentials.zip`. This file contains the URL of your device gateway, which is specific to your account. ==== -. Copy the device credentials and device gateway root CA certificate to the device. +. Copy the device credentials and device gateway root CA certificate to the device. + -[source,sh,subs="attributes"] +[source,sh] ---- export device_dir=path/to/device-creds/dir scp -P 2222 -pr ${device_dir} root@localhost:/var/sota/import @@ -99,4 +99,4 @@ Once the certificates have copied, the following chain of events should occur: + .. The server authenticates the client device by verifying that the client's certificate was signed by the root CA private key that was uploaded in step 2. .. The client device authenticates the server by verifying that the server's certificate was signed by the server's internal root CA private key. -.. The device is provisioned and appears online in the web UI. \ No newline at end of file +.. The device is provisioned and appears online in the web UI. diff --git a/docs/ota-client-guide/modules/ROOT/pages/enable-device-cred-provtest.adoc b/docs/ota-client-guide/modules/ROOT/pages/enable-device-cred-provtest.adoc deleted file mode 100644 index ceb1710deb..0000000000 --- a/docs/ota-client-guide/modules/ROOT/pages/enable-device-cred-provtest.adoc +++ /dev/null @@ -1,134 +0,0 @@ -= Enable device-credential provisioning and install device certificates -ifdef::env-github[] - -[NOTE] -==== -We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. -==== -endif::[] - - -//MC: This is a copy of the topic "enable-device-cred-provisioning.adoc" but intended for the "test" use case. Need to use more includes to reduce redundancy. - -If you've followed our recommendation to use device-credential provisioning, you'll need to test how it works in your environment. You start by building disk images that are configured to use device-credential provisioning. - -After you have flashed those images to devices, you boot the image and install the device certicate for each device. You can install the certificate to the device's fileystem or use an HSM. - -== Enable and install _without_ an HSM - -You don't need an HSM to provision with device credentials, but we recommend that you use one. If you want to do without an HSM for now, use this procedure. - -To enable device-credential provisioning and install device certificates _without_ an HSM, follow these steps: :: - -. Add the following lines to your `conf/local.conf`: -+ ----- -SOTA_CLIENT_PROV = "aktualizr-device-prov" -SOTA_DEPLOY_CREDENTIALS = "0" -SOTA_PACKED_CREDENTIALS = "/path/to/your/credentials.zip" -IMAGE_INSTALL_append = " dropbear " ----- -+ -[NOTE] -==== -The line `IMAGE_INSTALL_append = " dropbear "` ensures that an ssh server is installed on the image. You'll need to ssh into the device to copy the certificates to the device's filesystem. -==== -. Build a standard image using bitbake. -. Boot the image. -. Run the following commands to tell the device what server URL to connect to: -+ -[source,sh,subs="attributes"] ----- -unzip credentials.zip autoprov.url -scp -P 2222 autoprov.url root@localhost:/var/sota/import/gateway.url ----- -+ -[NOTE] -==== -You might remember that `credentials.zip` contains a provisioning key shared-credential provisioning. In this case we just need the `autoprov.url` file inside `credentials.zip`. This file contains the URL of your device gateway which is specific to your account. -==== -. Copy the device credentials and device gateway root CA certificate to the device. -+ -[source,sh,subs="attributes"] ----- -export device_dir=path/to/device/dir -scp -P 2222 -pr ${device_dir} root@localhost:/var/sota/import ----- -+ -[NOTE] -==== -Replace `path/to/device/dir` with the device directory that you noted when xref:generatetest-devicecert.adoc[generating the device certificate]. -==== -+ -. _(Optional)_ When the copy operation has completed, ssh into your device and check the aktualizr log output with the following `systemd` command: -+ -`journalctl -f -u aktualizr` -+ -Once the certificates have copied, the following chain of events should occur: -+ -.. The server authenticates the client device by verifying that the client's certificate was signed by the root CA private key that was uploaded in step 2. -.. The client device authenticates the server by verifying that the server's certificate was signed by the server's internal root CA private key. -.. The device is provisioned and appears online in the web UI. - - -== Enable and install _with_ an HSM - -As described in the xref:index.adoc[introduction], it's a good idea to use a Hardware Security Model (HSM) to hold potentially sensitive device credentials. - -The following procedure describes how to use QEMU and link:https://www.opendnssec.org/softhsm/[SoftHSM] to simulate a device with an HSM. - -However, the procedure for your HSM will probably be different. We've provided these instructions as a basic guide to how this provisioning method works but you'll need to make further changes on your own. For example, you'll probably need to adapt your BSP so that aktualizr can access the keys from your HSM. - -To enable device-credential provisioning and install device certificates _with_ an HSM, follow these steps: :: - -. Add the following lines to your `conf/local.conf`: -+ ----- -SOTA_CLIENT_FEATURES = "hsm" -SOTA_CLIENT_PROV = "aktualizr-device-prov-hsm" -SOTA_DEPLOY_CREDENTIALS = "0" -IMAGE_INSTALL_append = " softhsm-testtoken dropbear " ----- -+ -[NOTE] -==== -The line `IMAGE_INSTALL_append = " softhsm-testtoken dropbear "` ensures that softhsm and an ssh server are installed on the image. You'll need to ssh into the device to copy the certificates to the hsm. -==== -. Build a standard image using bitbake. -. Boot the image. -. Run the following commands to tell the device what server URL to connect to: -+ -[source,sh,subs="attributes"] ----- -unzip credentials.zip autoprov.url -scp -P 2222 autoprov.url root@localhost:/var/sota/import/gateway.url ----- -+ -[NOTE] -==== -You might remember that `credentials.zip` contains a provisioning key shared-credential provisioning. In this case we just need the `autoprov.url` file inside `credentials.zip`. This file contains the URL of your device gateway which is specific to your account. -==== -. Copy the device credentials and device gateway root CA certificate to the device's HSM. -+ -[source,sh,subs="attributes"] ----- -export device_dir=path/to/device/dir -scp -P 2222 -pr ${device_dir} root@localhost:/var/sota/import ----- -+ -[NOTE] -==== -Replace `path/to/device/dir` with the device directory that you noted when xref:generatetest-devicecert.adoc[generating the device certificate]. - -For the QEMU simulated HSM, replace `path/to/device/dir` with the credentials directory of the relevant device. -==== -+ -. _(Optional)_ When the copy operation has completed, ssh into your device and check the aktualizr log output with the following `systemd` command: -+ -`journalctl -f -u aktualizr` -+ -Once the certificates have copied, the following chain of events should occur: -+ -.. The server authenticates the client device by verifying that the client's certificate was signed by the root CA private key that was uploaded in step 2. -.. The client device authenticates the server by verifying that the server's certificate was signed by the server's internal root CA private key. -.. The device is provisioned and appears online in the web UI. diff --git a/docs/ota-client-guide/modules/ROOT/pages/evaluation-to-prod.adoc b/docs/ota-client-guide/modules/ROOT/pages/evaluation-to-prod.adoc index 031389ffdb..2353dfb991 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/evaluation-to-prod.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/evaluation-to-prod.adoc @@ -8,19 +8,26 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} endif::[] -The procedures to deploy OTA Connect in production are a little more complex than the basic xref:dev@getstarted::index.adoc[Get Started] procedures. +The procedures to deploy OTA Connect in a realistic production scenario are a little more complex than the basic xref:getstarted::index.adoc[Get Started] procedures. However, you don't need to commit the resources for a full production roll-out right at the beginning of the process. -That's why it's better work in phases when setting up your OTA Connect in your organization. We recommend that you work in three main phases: +That's why it's better to work in phases when setting up OTA Connect in your organization. We recommend that you work in three main phases: . __Evaluate OTA Connect__ -. __Build your own OTA-enabled solution__ -. __Deploy your own OTA-enabled solution__ +. __Integrate OTA Connect__ +. __Deploy your OTA-enabled solution__ -This guide contains chapters to guide you through each phase and the following sections give you an introduction the phased approach: +No matter which phase you're in, there are 4 basic tasks you'll need to do: + +. Build device images with an aktualizr-based client on it. +. Sign and upload the device images. +. Provision devices with authentication credentials for your account. +. Send updated images to some or all of your devices. + +This guide contains chapters to guide you through each phase. The sections below give you an introduction to the phased approach. == Evaluate OTA Connect -During evaluation, you should focus on testing the basic update functionality to make sure that you understand how it works. At this stage you don't need to think about customization or production-level security. +During the evaluation phase, you should focus on testing the basic update functionality to make sure that you understand how it works. At this stage, you don't need to think about customization or production-level security. You'll use basic, minimal device images for test boards, or even just simulate devices, and OTA Connect will handle the software signing on the server. For provisioning, you'll use a shared registration key for all your devices. === Recommendations @@ -28,15 +35,17 @@ Here are our recommendations for xref:intro-evaluate.adoc[evaluating OTA connect include::partial$recommended-steps.adoc[tags=evaluate-steps] -== Build your own OTA-enabled solution +== Integrate OTA Connect + +In this phase, you'll move more towards a realistic production workflow. This will take you into some more complex tasks, like generating unique device keys for registering devices, signed with your own certificate authority. You also might start thinking about using libaktualizr to customize your update flow--for example, to integrate with a user interface, or to update Secondary ECUs. You'll also take your software signing keys offline, and sign software yourself before uploading it. === Recommendations -Here are our recommendations for xref:intro-prep.adoc[integrating OTA Connect into your production environment]: +Here are our recommendations for xref:intro-prep.adoc[integrating OTA Connect into your production workflows]: include::partial$recommended-steps.adoc[tags=integrate-steps] -== Deploy your OTA-enabled solution +== Deploy a fully production-ready OTA solution You've done your testing and now you're ready to xref:intro-prod.adoc[use OTA Connect in production]. You need to make sure that your device provisioning process is production-ready and that your software is available in your production account. diff --git a/docs/ota-client-guide/modules/ROOT/pages/fault-injection.adoc b/docs/ota-client-guide/modules/ROOT/pages/fault-injection.adoc index 79f307e696..717aa4eb83 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/fault-injection.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/fault-injection.adoc @@ -7,8 +7,8 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] -ifdef::env-github[:simulatelink: link:https://docs.ota.here.com/ota-client/dev/simulate-device-basic.html[Simulate a device without building a disk image]] -ifndef::env-github[:simulatelink: xref:dev@getstarted::simulate-device-basic.adoc[Simulate a device without building a disk image]] +ifdef::env-github[:simulatelink: link:https://docs.ota.here.com/ota-client/latest/simulate-device-basic.html[Simulate a device without building a disk image]] +ifndef::env-github[:simulatelink: xref:simulate-device-basic.adoc[Simulate a device without building a disk image]] To test the system in adverse conditions, it can be useful to make aktualizr fail in a controlled fashion. @@ -39,10 +39,13 @@ Usage is as follow: Please try to keep this list up-to-date when inserting/removing fail points. -- `fake_package_download`: force the fake package manager download to fail, optionally with a failure code supplied via `failinfo` -- `fake_package_install`: force the fake package manager installation to fail, optionally with a failure code supplied via `failinfo` -- `secondary_install_xxx` (xxx is a virtual secondary ecu id): force a virtual secondary installation to fail, optionally with a failure code supplied via `failinfo` +- `fake_package_download`: force the fake package manager download to fail +- `fake_package_install`: force the fake package manager installation to fail - `fake_install_finalization_failure`: force the fake package manager installation finalization to fail +- `secondary_putmetadata`: force virtual Secondary metadata verification to fail +- `secondary_putroot`: force virtual Secondary Root rotation to fail +- `secondary_sendFirmware_xxx` (xxx is a virtual Secondary ECU ID): force a virtual Secondary firmware send to fail +- `secondary_install_xxx` (xxx is a virtual Secondary ECU ID): force a virtual Secondary installation to fail == Use in unit tests diff --git a/docs/ota-client-guide/modules/ROOT/pages/finding-unsigned-metadata.adoc b/docs/ota-client-guide/modules/ROOT/pages/finding-unsigned-metadata.adoc new file mode 100644 index 0000000000..e8c3a1b729 --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/finding-unsigned-metadata.adoc @@ -0,0 +1,27 @@ += Find the unsigned Root and Targets metadata +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + +If you want to use your own PKI, you need to know where in your local repository you can find the metadata that you want to sign. +It may be the `root.json` or `targets.json` files. You can find both files in the `tuf//roles/unsigned` folder. + +NOTE: is the name you specified when you initialized your repository using `garage-sign init`. + +If the `unsigned/` folder is empty, you need to pull the metadata files: + +* To pull the unsigned `root.json` file, use `garage-sign root pull`. +* To pull the unsigned `targets.json` file, use `garage-sign targets pull`. + +If you have not created any targets, to create the unsigned `targets.json` file, use `garage-sign targets init`. + +To learn more about the `garage-sign` commands and options, see its xref:garage-sign-reference.adoc[reference] documentation. + +== Generate Root and Targets metadata in a canonical form + +To generate unsigned metadata in a canonical form, use the `garage-sign root get-unsigned` and `garage-sign targets get-unsigned` commands +for the unsigned `root.json` and `targets.json` files respectively. The files that you get are stored in the `unsigned/` folder. diff --git a/docs/ota-client-guide/modules/ROOT/pages/garage-sign-reference.adoc b/docs/ota-client-guide/modules/ROOT/pages/garage-sign-reference.adoc new file mode 100644 index 0000000000..9e2291dee8 --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/garage-sign-reference.adoc @@ -0,0 +1,405 @@ += Garage-sign commands and options +:type: The type of key that you want to create: Ed25519 or RSA. +:keysize: The length of the key that you want to create, in bits. RSA 2048/4096 and Ed25519 are supported. +:key-name-text: The base filename for your keys. Generated files will be named `.sec` and `.pub`. +:keys-path: The path where this executable will look for keys. By default, it is the `user-keys` directory in the directory that you specified with the `--home-dir` command. +:inplace: Modifies the input .json file directly. If this option is not specified, it outputs the signed metadata to stdout. +:length: The length of the target, in bytes. +:version: The version string of the target. +:sha-256: The hash of the binary. For OSTree images, it is the root hash of the target commit. +:hardware-ids: The types of hardware with which this image is compatible. +:expires: The metadata expiry date. It is a UTC instant, such as `2020-01-01T00:01:00Z`. +:expire-after: The expiration delay in years, months, and days (each optional, but in that order), such as `1Y3M5D`. +:force: Skips sanity checking. For example, allows to set a date in the past. +:format: The format of the target: [`ostree`\|`binary`] +:url: (Optional) An external URL where the binary can be downloaded. + +You can use the `garage-sign` tool if you need to sign metadata about your software, as well as manage your software signing keys and root of trust. When you start using OTA Connect, we generate a root of trust and a signing key for you, and automatically sign software you upload. Later on, you can use `garage-sign` to rotate those keys, taking them completely offline to increase security. + +In the reference below, find a list of commands and options that you can use. + +.Garage-sign reference ++++
+++ +Global options ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| --help | Prints all available `garage-sign` commands and options. +| --version | Prints the current binary version. +| --verbose | Prints the verbose information for the execution. +| -h, --home-dir | The directory that you want to work with. By default, it is your current working directory. +|==================== + ++++
+++ + ++++
+++ +`user-keys [gen|id]`: Manages keys stored outside of a specific repository's directory. ++++
+++ + ++++
+++ +`user-keys gen`: Creates a key pair and stores it in a configurable location. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -t, --type | {type} +| --keysize | {keysize} +| -k, --key-name | {key-name-text} +|==================== + ++++
+++ + ++++
+++ +`user-keys id`: Calculates the Uptane key ID for a given public key. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -i, --input | The path to the file with your public key. +|==================== + ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -p, --keys-path | {keys-path} +|==================== + ++++
+++ + ++++
+++ +`delegations [init|sign|push|pull|add-target]` image:img::beta-icon.svg[Beta]: Manages delegation metadata. ++++
+++ + +`delegations init`: Creates an empty .json file with delegation metadata that you can edit and sign. + ++++
+++ +`delegations sign`: Signs delegation metadata. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -k, --key-name | The base name of the key to use for signing. +| -p, --keys-path | {keys-path} +| -i, --input | The path to the delegated Targets metadata file that you want to sign. +| -e, --inplace | {inplace} +|==================== + ++++
+++ + ++++
+++ +`delegations push`: Pushes delegation metadata to the server. Requires an initialized `tuf` repository. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -r, --repo | The name of your local repository. This repository should be a directory in your `tuf` repository. You can create the repository with the `init` command. +| -n, --name | The name of the delegation. +| -i, --input | The path to the signed .json file with delegations. +|==================== + ++++
+++ + ++++
+++ +`delegations pull`: Pulls a delegated Targets metadata file from the server. Requires an initialized `tuf` repository. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -r, --repo | The name of your local repository. This repository should be a directory in your `tuf` repository. You can create the repository with the `init` command. +| -n, --name | The name of the delegation. +| -o, --output | The name of the file to which you want to save the delegation. +|==================== + ++++
+++ + ++++
+++ +`delegations add-target`: Adds a new target to a delegated Targets metadata file. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| --length | {length} +| --name | The name of the target. +| --version | {version} +| --format | {format} +| --sha256 | {sha-256} +| --hardwareids | {hardware-ids} +| --url | {url} +| -i, --input | The path to the delegated Targets metadata file that you want to modify. +| -e, --inplace | {inplace} +|==================== + ++++
+++ + ++++
+++ + ++++
+++ +`init`: Creates an empty local repository. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -r, --repo | The name of the local repository that you want to create. This repository should be a directory in your `tuf` repository. +| --reposerver | The repo server URL. By default, reads the URL from the .zip file with your provisioning credentials. +| -c, --credentials | The path to the .zip file with your provisioning credentials. +| -t, --servertype | The repo server type: `reposerver` (default) or `director`. +|==================== + ++++
+++ + ++++
+++ +`key [generate]`: Manages keys stored in a specific local repository's directory. ++++
+++ ++++
+++ +`key generate`: Generates a new key and saves it in a specific repository. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -r, --repo | The name of the local repository where you want to save your new key. This repository should be a directory in your `tuf` repository. You can create the repository with the `init` command. +| -n, --name | {key-name-text} +| -t, --type | {type} +| --keysize | {keysize} +|==================== + ++++
+++ ++++
+++ + ++++
+++ +`move-offline`: Removes online keys from OTA Connect, and updates the environment to use locally stored offline keys. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -r, --repo | The name of the local repository where you want to rotate keys. This repository should be a directory in your `tuf` repository. You can create the repository with the `init` command. +| --new-root | The new Root key that you want to add to the `root.json` file (should already exist). +| --new-targets | (Only for the repo server) The new Targets key that you want to add to the `root.json` file (should already exist). +| --old-root-alias | The alias of the old Root key. The old Root key will be saved under this name. +| --old-keyid | (Optional) The ID of the key that you want to remove from the `root.json` file. This app will try to use the last key defined in the current `root.json` file. +|==================== + ++++
+++ + ++++
+++ +`root [pull|push|get-unsigned|key|sign]`: Manages root-of-trust metadata for a repository. ++++
+++ + +`root pull`: Pulls the current `root.json` file from OTA Connect. + +`root push`: Uploads local `root.json` file to OTA Connect. If the file does not have a valid signature, it will be rejected by the server. + +`root get-unsigned`: Generates an unsigned `root.json` file in a canonical JSON form. + ++++
+++ +`root key [add|remove]`: Manages keys that are permitted to sign the root-of-trust metadata. ++++
+++ + ++++
+++ +`root key add`: Adds a specific key to the list of keys authorized to sign the root-of-trust metadata. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -k, --key-name | The path to the public key that you want to add. +|==================== + ++++
+++ + ++++
+++ +`root key remove`: Removes a specific key from the list of keys authorized to sign the root-of-trust metadata. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -k, --key-name | The name of the file with the keys that you want to remove. You can use the `--key-id` command instead. +| --key-id | The ID of the public key that you want to remove. You can use the `--key-name` command instead. +|==================== + ++++
+++ + ++++
+++ + ++++
+++ +`root sign`: Signs your root-of-trust metadata with a specific key and sets the expiry. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -k, --key-name | The path to the public key to use for signing. +| --expires | {expires} +| --expire-after | {expire-after} +| --force | {force} +|==================== + ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -r, --repo | The name of the local repository where you want to manage the `root.json` file. This repository should be a directory in your `tuf` repository. You can create the repository with the `init` command. +|==================== + ++++
+++ + ++++
+++ +`targets [init|add|add-uploaded|delete|sign|pull|push|get-unsigned|upload|delegations]`: (Only for repositories of type `reposerver`) Manages Targets metadata. +// tag::target-term[] +*Target* is a term from Uptane. Each Target corresponds to a software version available in your OTA Connect software repository. +// end::target-term[] ++++
+++ + ++++
+++ +`targets init`: Creates a new top-level (non-delegated) `targets.json` file. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| --version | The version of the `targets.json` file. Versions are integers, normally starting at 1. They must always increase in each successive `targets.json` version. +| --expires | {expires} +|==================== ++++
+++ + ++++
+++ +`targets add`: Adds a target. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| --length | {length} +| --name | The name of the target. +| --version | {version} +| --format | {format} +| --sha256 | {sha-256} +| --hardwareids | {hardware-ids} +| --url | {url} +|==================== ++++
+++ + ++++
+++ +`targets delete`: Deletes a single target. This target can no longer be installed on devices. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| --filename | The exact name of the target to remove. Should be in one of the following forms: `_` for OSTree images, or `-` for binary images. +|==================== ++++
+++ + ++++
+++ +`targets sign`: Signs your `targets.json` file with a specific key. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| --key-name | The path to the public key to use for signing. +|--version | The version number to use for the signed metadata. Overrides the version in the unsigned `targets.json`. +| --expires | {expires} +| --expire-after | {expire-after} +| --force | {force} +|==================== ++++
+++ + +`targets pull`: Pulls the current `targets.json` file from OTA Connect. + +`targets push`: Pushes the latest `targets.json` file to the server. +If the Targets file is invalid, for example because of a bad signature or a non-increasing version number, this `push` will fail with exit code 2. + +`targets get-unsigned`: Generates the unsigned `targets.json` file in a canonical JSON form. + ++++
+++ +`targets upload`: Uploads a binary to the repository. +// tag::targets-upload-note[] +Note that this *will not* make the binary available on its own. After the upload completes successfully, add it to your `targets.json` file using the `targets add-uploaded` command. +// end::targets-upload-note[] ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -i, --input | The path to the file that you want to upload. +| --name | The name of the target. +| --version | {version} +| --timeout | The timeout for the HTTP request of the upload, in seconds. +|==================== ++++
+++ + ++++
+++ +`targets add-uploaded`: Adds a target that you previously uploaded to OTA Connect using the `targets upload` command. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -i, --input | The path to the binary file. +| --name | The name of the target. +| --version | {version} +| --hardwareids | {hardware-ids} +|==================== ++++
+++ + +`targets delegations`: Manages the delegated Targets of the repository `targets.json` file. + ++++
+++ +`targets delegations add`: Adds a new delegation to the existing `targets.json` file. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -n, --name | The name of the target. +| -p, --prefix | The path prefix of the image that you want to delegate. +| -k, --key | The path to the public key that you want to add as a delegation key. +|==================== ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -r, --repo | The name of your local repository. This repository should be a directory in your `tuf` repository. You can create the repository with the `init` command. +|==================== + ++++
+++ + ++++
+++ +`export-credentials`: Exports settings and keys to the .zip file with your provisioning credentials. ++++
+++ + +[.release_notes] +[cols="15m,75a"] +|==================== +| -r, --repo | The name of your local repository. This repository should be a directory in your `tuf` repository. You can create the repository with the `init` command. +| -k, --key-name | The name of the file with your private and public keys that you want to export. +| -o, --output | The name of the file to which you want to export our credentials. +|==================== ++++
+++ + +To learn how to use the garage-sign tool, see the following documentation: + +* xref:keep-local-repo-on-external-storage.adoc[Keep your repository on external storage] +* xref:rotating-signing-keys.adoc[Rotate keys for Root and Targets metadata] +* xref:finding-unsigned-metadata.adoc[Find the unsigned Root and Targets metadata] +* xref:change-signature-thresholds.adoc[Change signature thresholds] +* xref:metadata-expiry.adoc[Manage metadata expiry dates] +* xref:customise-targets-metadata.adoc[Add custom metadata fields to Targets metadata] +* xref:upload-large-binary.adoc[Upload a binary file] +* xref:remove-sw-version.adoc[Remove a software version] diff --git a/docs/ota-client-guide/modules/ROOT/pages/generate-devicecert.adoc b/docs/ota-client-guide/modules/ROOT/pages/generate-devicecert.adoc index c66fa6154a..8ca16b35f1 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/generate-devicecert.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/generate-devicecert.adoc @@ -8,65 +8,83 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} endif::[] -Once you have your final fleet root certificate, you can use it to generate and sign device certificates. You can then automate the process of installing device certificates on your devices. +Once you have your final Fleet Root CA, you can use it to sign device certificates. You can then automate the process of installing device certificates, or generate the keys and certificate on the device and sign the cert via PKCS#10 CSR. -We can't tell you exactly how to automate this process, but heres a recap of the steps involved. +We can't tell you exactly how to automate this process, but heres a recap of how to do it manually with a self-signed fleet root CA: *To generate a device certificate, follow these steps* -. Generate a UUID for the device, and make a directory for it: +. Make sure you already have xref:generate-selfsigned-root.adoc[generated a fleet root CA], and have a directory structure like the one in that guide. If you don't have exactly the same directory structure, adjust the following commands as needed. +. Generate an ID for the device, and make a directory for it. This ID should be unique within your fleet, so we recommend using a UUID if you do not already have a scheme of unique identifiers. + [source,bash] ---- -export SERVER_NAME=myservername -export DEVICES_DIR = DEVICES_DIR="./${SERVER_NAME}/devices" CWD="${PWD}" -export DEVICE_UUID=$(uuidgen | tr "[:upper:]" "[:lower:]") -export device_id=${DEVICE_ID:-${DEVICE_UUID}} device_dir="${DEVICES_DIR}/${DEVICE_UUID}" -mkdir -p "${device_dir}" +export device_id=$(uuidgen | tr "[:upper:]" "[:lower:]") +export device_dir=${fleet_name}/devices/${device_id} +mkdir -p ${device_dir} ---- -+ -[NOTE] -==== -You might want to update the line `export DEVICE_UUID=` and update it to reflect your own schema for generating device IDs. Currently this command generates a random ID. -==== -. Generate a device certificate and public key, and sign it with your fleet root certificate. -+ -As a reference, here is the command to generate and sign a device certificate with a self-signed root certificate. +. Generate a device certificate and public key, and sign it with your local Fleet Root CA. More complex architectures are possible, such as using a CSR server and xref:hsm-provisioning-example.adoc[generating the device certificates inside an HSM on the device], but are out of scope for this document. +.. You'll need OpenSSL config files called `client.cnf` and `client.ext` stored in the `devices` directory. You can paste the following to create the files with our recommended configuration: + [source,bash] ---- -include::example$start.sh[tags="genclientkeys"] ----- +cat < ${fleet_name}/devices/device_cert.cnf +[req] +prompt = no +distinguished_name = dn +req_extensions = ext -. Find out the address of the device gateway for your OTA Connect Account -+ -You can get this address from the `credentials.zip` that you download from the OTA Connect Portal. +[dn] +CN=\$ENV::device_id + +[ext] +keyUsage=critical, digitalSignature +extendedKeyUsage=critical, clientAuth +EOF + +cat < ${fleet_name}/devices/device_cert.ext +keyUsage=critical, digitalSignature +extendedKeyUsage=critical, clientAuth +EOF +---- +.. Generate and sign the new device certificate: + -You need this address to get the internal root CA certificate of the device gateway. This certificate is also necessary to provision devices. +[source,bash] +---- +# Generate a new elliptic curve based key +openssl ecparam -genkey -name prime256v1 | openssl ec -out "${device_dir}/pkey.ec.pem" -.. If you haven't done so already, xref:dev@ota-web::create-provisioning-key.adoc[download a provisioning key]. -.. Extract the contents of the `credentials.zip` file to a local folder. -.. In that folder, look for the file `autoprov` and open it with a text editor. +# Convert it to PKCS#8 format +openssl pkcs8 -topk8 -nocrypt -in "${device_dir}/pkey.ec.pem" -out "${device_dir}/pkey.pem" + +# Create a CSR for the new device +openssl req -new -config "${fleet_name}/devices/device_cert.cnf" -key "${device_dir}/pkey.pem" \ + -out "${device_dir}/${device_id}.csr" + +# Submit and resolve the CSR using your locally-generated Fleet Root CA +openssl x509 -req -days 365 -extfile "${fleet_name}/devices/device_cert.ext" \ + -in "${device_dir}/${device_id}.csr" -CAkey "${fleet_name}/fleet_root_ca.key" \ + -CA "${fleet_name}/fleet_root_ca.crt" -CAcreateserial -out "${device_dir}/client.pem" +---- + -You should see a URL that resembles the following example: +WARNING: These command string is designed for openssl 1.1 or higher. If you are using an older version, or if you are using LibreSSL, it may not work. Note that the default openssl provided by MacOS is LibreSSL; if you wish to try this process on a Mac you should install openssl from HomeBrew and add it to your path. +. Get the URL and certificate for your account's device gateway +.. You can get the URL from the `credentials.zip` that you download from the OTA Connect Portal. If you haven't done so already, xref:generating-provisioning-credentials.adoc[download a provisioning key]. +.. Extract the contents of the `credentials.zip` file to a local folder. +.. In that folder, look for the file `autoprov.url`. It will contain a URL that resembles the following example: + -`https://946f68b8-13d2-4647-b335-5a48777b5657.tcpgw.prod01.advancedtelematic.com:443` -.. Make a note of this URL. - -. Get the device gateway's root certificate with the following openssl command: +---- +https://beefcafe-13eb-478b-b215-fbd10dbbec0e.device-gateway.ota.api.here.com:443 +---- +.. Get the device gateway's root certificate and save it in the device directory with the following openssl command: + +[source,bash] ---- -export device_gateway= +export device_gateway= <1> openssl s_client -connect ${device_gateway}:8000 -servername $device_gateway -showcerts | \ sed -n '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > ${device_dir}/root.crt ---- -+ -Replace, the placeholder `` with URL that you noted in the previous step. -+ -. Make a note where the actual `$(device_dir)` is on your computer. -+ -You can quickly get it with the command `echo $(device_dir)`. Your device directory should resemble the following example: -+ -`myservername/devices/4e7cdc4f-b7dc-4fb0-900f-a237ba3e804c/` -. Once have noted your device directory, you can xref:enable-device-cred-provisioning.adoc[install the device certificate on the device]. -// end::install-root-ca[] \ No newline at end of file +<1> Replace `` with the URL from the previous step. + +Once you have a signed device certificate and the device gateway's cert saved in your device directory, you can xref:enable-device-cred-provisioning.adoc[install the certificates on the device]. +// end::install-root-ca[] diff --git a/docs/ota-client-guide/modules/ROOT/pages/generate-selfsigned-root.adoc b/docs/ota-client-guide/modules/ROOT/pages/generate-selfsigned-root.adoc index b593bfd991..1d9284f0bd 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/generate-selfsigned-root.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/generate-selfsigned-root.adoc @@ -1,4 +1,4 @@ -= Generate a self-signed root certificate += Generate a self-signed fleet root certificate ifdef::env-github[] [NOTE] @@ -8,31 +8,49 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} endif::[] -When you move to production, you'll need to register your fleet root certificate with OTA Connect server. This certificate needs to be signed by a trusted Certificate Authority (CA). +When you move to production, you'll need to register your Fleet Root certificate with OTA Connect server. -If you don't yet have your own CA certificate for signing device certificates, you can generate a self-signed certificate for testing. +If you don't yet have your own CA for signing device certificates, you can generate a self-signed certificate for testing. -// tag::install-root-ca[] - -To generate a self-signed root certificate, follow these steps: :: -. Create a directory structure for the keys, and get some sample configurations for the certificates from the OTA Community Edition project: +To generate a self-signed fleet root certificate, follow these steps: :: +. Create a directory structure for your fleet. We will use this same basic directory structure in each of the sections of this guide. ++ +[source,bash] +---- +export fleet_name=myfleet +mkdir ${fleet_name} +mkdir ${fleet_name}/devices +---- ++ +. Create a file called `fleet_root_ca.cnf` inside your fleet directory to configure openSSL for generating your fleet root CA: + [source,bash] ---- -export SERVER_NAME=myservername -export SERVER_DIR="./${SERVER_NAME}" DEVICES_DIR="./${SERVER_NAME}/devices" CWD="${PWD}" -mkdir -p "$DEVICES_DIR" certs -for file in client.cnf device_ca.cnf server.ext client.ext server.cnf server_ca.cnf; do - curl -o certs/$file https://raw.githubusercontent.com/advancedtelematic/ota-community-edition/master/scripts/certs/$file -done +cat < ${fleet_name}/fleet_root_ca.cnf +[req] +prompt = no +distinguished_name = dn +x509_extensions = ext + +[dn] +CN = \$ENV::fleet_name + +[ext] +basicConstraints=CA:TRUE +keyUsage = keyCertSign +extendedKeyUsage = critical, serverAuth +EOF ---- + -Then, generate the key and cert using openssl on the command line: +TIP: All of the listed extensions in this config file are required. If your fleet root CA does not contain at least these three extensions, it will be rejected. +. Generate the key and cert using openssl on the command line. + [source,bash] ---- -include::example$start.sh[tags="genserverkeys"] +openssl ecparam -genkey -name prime256v1 | openssl ec -out "${fleet_name}/fleet_root_ca.key" +openssl req -new -x509 -days 3650 -key "${fleet_name}/fleet_root_ca.key" -config \ + "${fleet_name}/fleet_root_ca.cnf" -out "${fleet_name}/fleet_root_ca.crt" ---- + -This will create a `./${SERVER_DIR}/devices/` directory with the `ca.crt` certificate and a `ca.key` private key. Keep the private key safe and secure. -. Next, xref:provide-testroot-cert.adoc[register the test root certificate with your OTA Connect account]. \ No newline at end of file +WARNING: These commands are designed for openssl 1.1 or higher. If you are using an older version, or if you are using LibreSSL, it may not work. Note that the default openssl provided by MacOS is LibreSSL; if you wish to try this process on a Mac you should install openssl from HomeBrew and add it to your path. +. Send an email to link:mailto:otaconnect.support@here.com[otaconnect.support@here.com] with your `fleet_root_ca.crt` file (but not `fleet_root_ca.key`!), and ask us register it as a Fleet Root certificate on your OTA Connect account. Depending on your account usage we may require extra verification steps to validate the new CA. diff --git a/docs/ota-client-guide/modules/ROOT/pages/generatetest-devicecert.adoc b/docs/ota-client-guide/modules/ROOT/pages/generatetest-devicecert.adoc deleted file mode 100644 index 014dbca07b..0000000000 --- a/docs/ota-client-guide/modules/ROOT/pages/generatetest-devicecert.adoc +++ /dev/null @@ -1,70 +0,0 @@ -= Generate a test device certificate -ifdef::env-github[] - -[NOTE] -==== -We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. -==== -endif::[] - - -// MC: This is a slightly altered copy of "generate-devicecert.doc" with wording that explains the process from the perspective of testing with a self-signed root cert. - -Once you have created a self-signed root certificate, you can use it to generate and sign device certificates. You then install these certificates on your test devices. - -*Generate a test device certificate, follow these steps* - -. Generate a UUID for the device, and make a directory for it: -+ -[source,bash] ----- -export SERVER_NAME=myservername -export DEVICES_DIR = DEVICES_DIR="./${SERVER_NAME}/devices" CWD="${PWD}" -export DEVICE_UUID=$(uuidgen | tr "[:upper:]" "[:lower:]") -export device_id=${DEVICE_ID:-${DEVICE_UUID}} device_dir="${DEVICES_DIR}/${DEVICE_UUID}" -mkdir -p "${device_dir}" ----- -+ -[NOTE] -==== -Replace `myservername` with the server name that you used to xref:generate-selfsigned-root.adoc[generate your root certificate] -- unless you actually used the placeholder suggestion `myservername` from that procedure. -==== -. Generate a device certificate and public key, and sign it with the root CA that you created previously. -+ -[source,bash] ----- -include::example$start.sh[tags="genclientkeys"] ----- - -. Find out the address of the device gateway for your OTA Connect Account -+ -You can get this address from the `credentials.zip` that you download from the OTA Connect Portal. -+ -You need this address to get the internal root CA certificate of the device gateway. This certificate is also necessary to provision devices. - -.. If you haven't done so already, xref:dev@ota-web::create-provisioning-key.adoc[download a provisioning key]. -.. Extract the contents of the `credentials.zip` file to a local folder. -.. In that folder, look for the file `autoprov` and open it with a text editor. -+ -You should see a URL that resembles the following example: -+ -`https://946f68b8-13d2-4647-b335-5a48777b5657.tcpgw.prod01.advancedtelematic.com:443` -.. Make a note of this URL. - -. Get the device gateway's root certificate with the following openssl command: -+ ----- -export device_gateway= -openssl s_client -connect ${device_gateway}:8000 -servername $device_gateway -showcerts | \ - sed -n '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > ${device_dir}/root.crt ----- -+ -Replace, the placeholder `` with URL that you noted in the previous step. -+ -. Make a note where the actual `$(device_dir)` is on your computer. -+ -You can quickly get it with the command `echo $(device_dir)`. Your device directory should resemble the following example: -+ -`myservername/devices/4e7cdc4f-b7dc-4fb0-900f-a237ba3e804c/` -. Once have noted your device directory, you can xref:enable-device-cred-provtest.adoc[install the test device certificate on the device]. -// end::install-root-ca[] diff --git a/docs/ota-client-guide/modules/ROOT/pages/generating-provisioning-credentials.adoc b/docs/ota-client-guide/modules/ROOT/pages/generating-provisioning-credentials.adoc index b564b1051b..3afd8d5900 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/generating-provisioning-credentials.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/generating-provisioning-credentials.adoc @@ -1,11 +1,10 @@ -ifndef::env-github[] +include::ota-web::page$create-provisioning-key.adoc[] + + +ifdef::env-github[] [NOTE] ==== We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. ==== endif::[] -include::dev@ota-web:ROOT:page$create-provisioning-key.adoc[] - -// MC: Images don't render from included files, added local copies of 's1-prov.png' and 'screenshot_provisioning_key_2.png' (from "ota-web") until I can find a better solution. -// Included from https://github.com/advancedtelematic/ota-plus-server/tree/docs/OTA-2683/migrate-docs-antora/ota-plus-web/docs/modules/ROOT/pages/create-provisioning-key.adoc \ No newline at end of file diff --git a/docs/ota-client-guide/modules/ROOT/pages/hsm-provisioning-example.adoc b/docs/ota-client-guide/modules/ROOT/pages/hsm-provisioning-example.adoc new file mode 100644 index 0000000000..d184ebbc87 --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/hsm-provisioning-example.adoc @@ -0,0 +1,210 @@ += Generate a device certificate using an HSM +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + +This section will demonstrate an example of handling device certificates with HSM and was tested using a NitroKey HSM and OpenSC. Instructions will vary depending on your target hardware. + +The steps are very similar to those described in xref:generate-devicecert.adoc[Device Certificate Generation] with the main difference being that the device keys are generated on the the HSM. Please make sure that you understood and tested these steps before continuing here. + +== Preparing the HSM + +You can directly follow link:https://github.com/OpenSC/OpenSC/wiki/SmartCardHSM#initialize-the-device[the steps provided by NitroKey] for this section. + +At the end, you should have an empty HSM. The SO-PIN should be kept in a safe place and won't be needed for the rest of this guide. We will however assume that you kept the PIN in a shell variable in the next instructions: + +[source,sh] +---- +export PIN=YOUR_PIN +---- + +== Generating the certificate and Uptane keys + +To continue, you will have to set up a root certificate, as described in xref:generate-selfsigned-root.adoc[Generate a self-signed root certificate]. If you chose to use an external CA, you will need to send them the certificate signing request (csr file) and obtain the certificate, instead of generating it yourself. + +. Install necessary dependencies ++ +[source,sh] +---- +sudo apt install opensc-pkcs11 openssl +---- ++ +. Generate an ID for the device, and make a directory for it. This ID should be unique within your fleet, so we recommend using a UUID if you do not already have a scheme of unique identifiers. ++ +[source,bash] +---- +export device_id=$(uuidgen | tr "[:upper:]" "[:lower:]") +export device_dir=${fleetname}/devices/${device_id} +mkdir -p ${device_dir} +---- +. Generate a key on the device with id 02 ++ +[source,sh] +---- +pkcs11-tool -l --pin $PIN --keypairgen --key-type EC:prime256v1 --id 02 --label devicekey +---- ++ +. Create a configuration file for OpenSSL, named hsm.conf: ++ +---- +# PKCS11 engine config +openssl_conf = openssl_def + +[openssl_def] +engines = engine_section + +[req] +distinguished_name = req_distinguished_name + +[req_distinguished_name] +# empty. + +[engine_section] +pkcs11 = pkcs11_section + +[pkcs11_section] +engine_id = pkcs11 +dynamic_path = /usr/lib/x86_64-linux-gnu/engines-1.1/pkcs11.so +MODULE_PATH = /usr/lib/x86_64-linux-gnu/pkcs11/opensc-pkcs11.so +PIN = $PIN +init = 0 +---- ++ +`dynamic_path`, `MODULE_PATH` and `PIN` will need to be changed depending on your system installation and HSM pin. ++ +. Generate a Certificate Signing Request using the key: ++ +[source,sh] +---- +OPENSSL_CONF=./hsm.conf openssl req -new -config "$CWD/certs/client.cnf" -engine pkcs11 -keyform engine -key 4:02 -out "$device_dir/$device_id.csr" +---- ++ +The `4:02` indicates the `slot:id` to use for the key, you will probably have to change it depending on your setup. In particular, the slot can be found by running `pkcs11-tool -L`: ++ +---- +Available slots: +Slot 1 (0x4): Nitrokey Nitrokey HSM (010000000000000000000000) 01 00 + token label : UserPIN (test) + token manufacturer : www.CardContact.de + token model : PKCS#15 emulated + token flags : login required, rng, token initialized, PIN initialized + hardware version : 24.13 + firmware version : 2.6 + serial num : DENK0200509 + pin min/max : 6/15 +---- ++ +The slot number is the one in hexadecimal between parentheses. ++ +. Generate the device certificate and store it in the HSM: ++ +[source,sh] +---- +openssl x509 -req -days 365 -extfile "${CWD}/certs/client.ext" -in "${device_dir}/${device_id}.csr" \ + -CAkey "${DEVICES_DIR}/ca.key" -CA "${DEVICES_DIR}/ca.crt" -CAcreateserial -out "${device_dir}/client.pem" +---- ++ +. Save the certificate on the HSM: ++ +[source,sh] +---- +openssl x509 -in "$device_dir/client.pem" -out "$device_dir/client.der" -outform der +pkcs11-tool -l --pin $PIN --write-object "$device_dir/client.der" --type cert --id 01 --label devicecert +---- ++ +. Generate an RSA key to sign Uptane metadata ++ +[source,sh] +---- +pkcs11-tool -l --pin $PIN --keypairgen --key-type RSA:2048 --id 03 --label uptanekey +---- ++ +At this point, you can verify that your device contains the three objects by running `pkcs11-tool -O`: ++ +---- +Using slot 1 with a present token (0x4) +Public Key Object; RSA 2048 bits + label: uptanekey + ID: 03 + Usage: encrypt, verify, wrap +Public Key Object; EC EC_POINT 256 bits + EC_POINT: 044104d59c51e5454d46787bdb9db3ea450bc118f71bf5fd352cf0ae4e41720d897eb4051345d0ef5470fd4e3b1c3c18066199915c88eeab7a3ad3e595d4ecaa38f564 + EC_PARAMS: 06082a8648ce3d030107 + label: devicepriv + ID: 02 + Usage: verify +Certificate Object; type = X.509 cert + label: Certificate + subject: DN: CN=089f19e2-2f52-4a30-98f1-66e35cc11611 + ID: 01 +Public Key Object; EC EC_POINT 256 bits + EC_POINT: 044104d59c51e5454d46787bdb9db3ea450bc118f71bf5fd352cf0ae4e41720d897eb4051345d0ef5470fd4e3b1c3c18066199915c88eeab7a3ad3e595d4ecaa38f564 + EC_PARAMS: 06082a8648ce3d030107 + label: Certificate + ID: 01 + Usage: encrypt, verify +---- + +== Setting up aktualizr + +The following conditions should be fulfilled: + +* the HSM token should be accessible on the device +* aktualizr must be compiled with P11 support (refer to xref:enable-device-cred-provisioning.adoc[Enable device-credential provisioning and install device certificates]) +* supporting libraries, such as OpenSC must be present on the device +* the device gateway url and TLS root certificate must be obtained from a set of credentials: ++ +[source,sh] +---- +unzip credentials.zip autoprov.url autoprov_credentials.p12 +mv autoprov.url gateway.url +openssl pkcs12 -in autoprov_credentials.p12 -nokeys -cacerts -out ca.crt +---- ++ +* aktualizr must be configured to use the gateway url, root certificate and HSM. For example: ++ +---- +[tls] +server_url_path = "/var/sota/import/gateway.url" +cert_source = "pkcs11" +pkey_source = "pkcs11" + +[p11] +module = "/usr/lib/opensc-pkcs11.so" +pass = "1234" +uptane_key_id = "03" +tls_clientcert_id = "01" +tls_pkey_id = "02" + +[uptane] +key_source = "pkcs11" + +[import] +base_path = "/var/sota/import" +tls_cacert_path = "root.crt" +---- ++ +Note: on Ubuntu Bionic, the OpenSC pkcs11 module lies in `/usr/lib/x86_64-linux-gnu/pkcs11/opensc-pkcs11.so`. + +== Provisioning the device + +If all these steps have been followed, the device will establish a TLS connection to the backend using the HSM and will sign its manifests with the Uptane private key. + +== Yocto integration + +You can here refer to the instructions in xref:enable-device-cred-provisioning.adoc[Enable device-credential provisioning and install device certificates] but use OpenSC instead of SoftHSM: + +---- +IMAGE_INSTALL_append = " opensc" +SOTA_CLIENT_FEATURES = "hsm" +SOTA_CLIENT_PROV = "aktualizr-device-prov-hsm" +SOTA_DEPLOY_CREDENTIALS = "0" +---- + +Also, the configuration fragment `/usr/lib/sota/conf.d/20-sota-device-cred-hsm.toml` will also have to be modified, as detailed in the previous section, for example with a `.bbappend`. + +Note that for the moment, the gateway url and root certificate will still need to be copied manually to the device. diff --git a/docs/ota-client-guide/modules/ROOT/pages/install-garage-sign-deploy.adoc b/docs/ota-client-guide/modules/ROOT/pages/install-garage-sign-deploy.adoc index 3cbe0c7705..37fc0fcea4 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/install-garage-sign-deploy.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/install-garage-sign-deploy.adoc @@ -12,11 +12,11 @@ endif::[] :page-date: 2018-09-13 11:50:24 :page-order: 2 :icons: font -:garage-deploy-version: 2019.8 +include::partial$aktualizr-version.adoc[] For our recommended production workflow, we recommend some extra security procedures. Before you can follow these procedures, you need to install our `garage-deploy` tool first. -We currently provide released versions of `garage-deploy` for Ubuntu 18.04 (Bionic) and Ubuntu 16.04 (Xenial) which are available on https://github.com/advancedtelematic/aktualizr/releases/tag/{garage-deploy-version}. +We currently provide released versions of `garage-deploy` for Ubuntu 18.04 (Bionic) and Ubuntu 16.04 (Xenial) which are available on https://github.com/advancedtelematic/aktualizr/releases/tag/{aktualizr-version}. == Installation instructions @@ -26,7 +26,7 @@ To install `garage-deploy` on an Ubuntu 18.04 machine, download the `garage-depl [subs="attributes"] ---- -wget https://github.com/advancedtelematic/aktualizr/releases/download/{garage-deploy-version}/garage_deploy-ubuntu_18.04.deb +wget https://github.com/advancedtelematic/aktualizr/releases/download/{aktualizr-version}/garage_deploy-ubuntu_18.04.deb sudo apt install ./garage_deploy-ubuntu_18.04.deb ---- @@ -34,22 +34,21 @@ For Ubuntu 16.04: [subs="attributes"] ---- -wget https://github.com/advancedtelematic/aktualizr/releases/download/{garage-deploy-version}/garage_deploy-ubuntu_16.04.deb +wget https://github.com/advancedtelematic/aktualizr/releases/download/{aktualizr-version}/garage_deploy-ubuntu_16.04.deb sudo apt install ./garage_deploy-ubuntu_16.04.deb ---- === Other debian-based distros or versions -If you're using another version of Ubuntu, or another Debian-based distribution that we don't provide packages for, you can build a .deb yourself. Check out https://github.com/advancedtelematic/aktualizr/tree/{garage-deploy-version}[aktualizr], install the required dependencies link:https://github.com/advancedtelematic/aktualizr/tree/{garage-deploy-version}#dependencies[listed here] (exact package names may vary) and build the deb package yourself: +If you're using another version of Ubuntu, or another Debian-based distribution that we don't provide packages for, you can build a .deb yourself. Check out https://github.com/advancedtelematic/aktualizr/tree/{aktualizr-version}[aktualizr], install the required dependencies link:https://github.com/advancedtelematic/aktualizr/tree/{aktualizr-version}#dependencies[listed here] (exact package names may vary) and build the deb package yourself: [subs="attributes"] ---- -git clone --branch {garage-deploy-version} --recursive https://github.com/advancedtelematic/aktualizr -sudo apt install asn1c build-essential clang clang-check-3.8 clang-format-3.8 clang-tidy-3.8 cmake curl \ +git clone --branch {aktualizr-version} --recursive https://github.com/advancedtelematic/aktualizr +sudo apt install asn1c build-essential clang clang-format-11 clang-tidy-11 cmake curl \ doxygen graphviz lcov libarchive-dev libboost-dev libboost-filesystem-dev libboost-log-dev \ - libboost-program-options-dev libboost-serialization-dev libboost-iostreams-dev libcurl4-openssl-dev \ - libdpkg-dev libostree-dev libp11-2 libp11-dev libpthread-stubs0-dev libsodium-dev libsqlite3-dev \ - libssl-dev libsystemd-dev + libboost-program-options-dev libcurl4-openssl-dev libostree-dev libp11-2 libp11-dev \ + libpthread-stubs0-dev libsodium-dev libsqlite3-dev libssl-dev libsystemd-dev cd aktualizr mkdir build cd build @@ -62,11 +61,11 @@ sudo apt install ./garage_deploy.deb If you're using a non-debian-based distro, you will need to build and install the binary directly. -First, install the required dependencies link:https://github.com/advancedtelematic/aktualizr/tree/{garage-deploy-version}#dependencies[listed here]. (These are the Ubuntu package names; the packages may be named differently in your distro's repositories.) Then, you can build as above, but with `garage-deploy` as the make target: +First, install the required dependencies link:https://github.com/advancedtelematic/aktualizr/tree/{aktualizr-version}#dependencies[listed here]. (These are the Ubuntu package names; the packages may be named differently in your distro's repositories.) Then, you can build as above, but with `garage-deploy` as the make target: [subs="attributes"] ---- -git clone --branch {garage-deploy-version} --recursive https://github.com/advancedtelematic/aktualizr +git clone --branch {aktualizr-version} --recursive https://github.com/advancedtelematic/aktualizr cd aktualizr mkdir build cd build @@ -79,11 +78,11 @@ The executable will be available in `build/src/sota_tools/garage-deploy`. == Usage -Once you've installed `garage-deploy` tool, you're ready to perform the following tasks: +Once you've installed the `garage-deploy` tool, you're ready to perform the following tasks: -* Move device images from one account to another--for example, to send a development build to the QA team, or to send a release candidate to the deployment team. +* Move device images from one environment to another. For example, you may need to send a development build to the QA team or a release candidate to the deployment team. + -For more information, see "xref:crossdeploying-device-images-to-a-different-account.adoc[Cross-deploy device images to a different account]". +For more information, see "xref:cross-deploy-images.adoc[Cross-deploy device images to a different environment]". * Create offline signing keys that you manage yourself and rotate out the installed online keys. + diff --git a/docs/ota-client-guide/modules/ROOT/pages/intro-prep.adoc b/docs/ota-client-guide/modules/ROOT/pages/intro-prep.adoc index 20420e13fa..d8ec54a7d7 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/intro-prep.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/intro-prep.adoc @@ -1,4 +1,4 @@ -= Build your own OTA-enabled solution += Integrate OTA Connect ifdef::env-github[] [NOTE] @@ -8,19 +8,19 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} endif::[] -Once you've evaluated the basic functions of OTA Connect, you're ready to build your own OTA-enabled solution. You probably have your client software that runs on your devices. In this phase, you'll start to integrate OTA update functionality into that client. +Once you've evaluated the basic functions of OTA Connect, you're ready to integrate OTA Connect into your real-world workflows. This might include building your own OTA client based on libaktualizr, separating out different software repositories for different environments, and more. -You also need to make sure that your device provisioning process reflects what you want to use in production. You also want to set up the software repository for your development account. +You will also start thinking about production-ready security, making sure that your device provisioning and software signing processes reflect what you want to use in production. You also want to set up the software repository for your development account. -Here's our recommended steps for building your own OTA-enabled solution: +Here are our recommended steps for integration: -* xref:account-setup.adoc[*Set up different user accounts*] +* xref:add-environments.adoc[*Set up different environments*] + -Avoid mixing up test software and production software by uploading software under different user accounts. +To avoid mixing up test software and production software upload the software to different environments. * xref:libaktualizr-why-use.adoc[*Integrate libaktualizr with the client software on your board*] + -When you move to production, you'll want to integrate OTA functionality into your board's native software. +When you move to production, you'll probably want to integrate OTA functionality into your board's native software rather than using the default stand-alone client. * xref:build-ota-enabled-images.adoc[*Build and deploy your integration*] + @@ -28,7 +28,7 @@ Once have a working version of your integration, you'll want to build a disk ima * xref:cross-deploy-images.adoc[*Transfer disk images to a QA repository*] + -After you've build your images, you'll want to hand them over to your QA team, who are ideally testing the software under a QA account with its own software repository. +After you've built your images, you'll want to hand them over to your QA team, who are ideally testing the software in a QA environment with its own software repository. * xref:device-cred-prov-steps.adoc[*Set up provisioning with device credentials*] + @@ -38,4 +38,3 @@ Install device certificates yourself rather than having the OTA Connect server i + To secure your software updates, all files are accompanied by metadata that is signed with several private keys. You need to move the most important private keys from the server and take them offline. - \ No newline at end of file diff --git a/docs/ota-client-guide/modules/ROOT/pages/intro-prod.adoc b/docs/ota-client-guide/modules/ROOT/pages/intro-prod.adoc deleted file mode 100644 index 8ef852ca8c..0000000000 --- a/docs/ota-client-guide/modules/ROOT/pages/intro-prod.adoc +++ /dev/null @@ -1,44 +0,0 @@ -= Moving to Production -ifdef::env-github[] - -[NOTE] -==== -We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. -==== -endif::[] - -{product-name} is designed to integrate easily into development workflows: you build your image, push it, set auto-updates for some of your test-bench devices, and so on. But once you're ready to move from testing into production, you will likely want to do a few things differently. Here is a summary of our recommended workflow for moving from development to production. - -== Maintain separate accounts for development, testing and production - -There are (at least) two good reasons to separate your dev/testing accounts from production: - -* Isolation of production-ready deployments - -{product-name-short} does not support deleting devices or device images. We do this for auditability, but it does mean that your dev/testing account can end up getting a bit cluttered. You wouldn't want to accidentally select the wrong build on a production run. - -* Separation of responsibilities - -Chances are, your developer team isn't responsible for making the decision to push updates live. You can cross-deploy images that are ready for testing to a test account controlled by QA, then once again to an account for production once they've passed QA. - -''' - -To make this workflow possible, create an account for each environment you find useful (e.g. dev, QA, beta, production, etc.), and then read about xref:crossdeploying-device-images-to-a-different-account.adoc[using garage-deploy to cross-deploy images from one account to another]. - -== Manage your own chain(s) of trust - -When you create an account, and when you create provisioning credentials, we generate various keys for you. There's a xref:provisioning-methods-and-credentialszip.adoc[table here] listing everything that's in `credentials.zip`, but the main thing you need to know is that there are two root authorities that we initially generate: a root CA for authenticating your devices during provisioning, and a private key for signing the metadata of the images you build. We take the security of these keys/certificates extremely seriously: following industry best practices, they are kept in a link:https://www.vaultproject.io/[Vault] instance and only taken out when you request them. - -However, *we don't need to have the keys at all*. You can manage your own root CAs in both cases. Images you build get signed locally before being pushed, and {product-name-short} only needs to verify the signatures, and thus xref:enable-device-cred-provisioning.adoc#_use_a_hardware_security_module_hsm_when_provisioning_with_device_credentials[provision your devices with device credentials]. - -Once you've done that, we won't have any of your private key material at all. - -== Consider using a Hardware Security Module - -In the quickstart guides, we automatically provision your devices when they come online for the first time. To make this happen, the devices need to present a provisioning key. If the provisioning key is valid, then {product-name-short} bootstraps the provisioning process by negotiating unique, device-specific credentials in the form of X.509 certificates for mutual TLS authentication. It's these generated credentials that are used when the device connects to the server for updates. - -We refer to this method as provisioning with "shared" credentials. Although each device eventually receives device-specific credentials, the process begins with a shared credential: the provisioning key. - -{product-name} also supports provisioning with pre-loaded device credentials. In this case, the device is pre-loaded with the requisite bootstrap credentials, signed by a root CA of your choice. These can be stored in the HSM, obviating the need for a provisioning key on the device that could potentially be compromised, and also obviating the need for us to hold key material for provisioning. - -How the HSM for your individual board or device works is up to you, but you can xref:enable-device-cred-provisioning.adoc#_simulate_the_provisioning_process_with_device_credentials[simulate a hardware security module in QEMU] to get an idea of how the process works. We provide instructions for QEMU HSM provisioning only; if you need development support in adapting the instructions to your own board, link:mailto:otaconnect.support@here.com[contact us for a consultation]. diff --git a/docs/ota-client-guide/modules/ROOT/pages/keep-local-repo-on-external-storage.adoc b/docs/ota-client-guide/modules/ROOT/pages/keep-local-repo-on-external-storage.adoc new file mode 100644 index 0000000000..56a10a3bad --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/keep-local-repo-on-external-storage.adoc @@ -0,0 +1,31 @@ += Keep your repository on external storage +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + +Your software repository contains information on your software images and packages, as well as your Root and Targets keys. +For safety reasons, we recommend keeping your keys offline on an external storage device, and for convenience, we recommend to simply keep the entire repository structure (with the keys included) on external storage. This external device should be kept offline, in a securely locked location, and only plugged into a computer when you need to make changes to the repository. + +*To get a copy of a software repository on external storage:* + +. Connect the external storage to your computer. +. Make sure you have the link:https://tuf-cli-releases.ota.here.com/index.html[latest version,window="_blank"] of the `garage-sign` tool. +. Get the .zip file with your provisioning credentials and save it to your external storage. ++ +For instructions, see the xref:ota-client::generating-provisioning-credentials.adoc[related] section in this guide. +. In the folder of your external storage, initialize a local repository. ++ +[source, bash] +---- +garage-sign init \ + --repo \ + --credentials +---- + +Work with your local repository only on the external storage. + +To learn more about the `garage-sign` commands and options, see its xref:garage-sign-reference.adoc[reference] documentation. \ No newline at end of file diff --git a/docs/ota-client-guide/modules/ROOT/pages/libaktualizr-getstarted.adoc b/docs/ota-client-guide/modules/ROOT/pages/libaktualizr-getstarted.adoc index 4caaab6137..3ba194413e 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/libaktualizr-getstarted.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/libaktualizr-getstarted.adoc @@ -48,7 +48,7 @@ make NOTE: The `--recursive` flag in the `git clone` command is needed to recursively clone all the *git submodules*. -After the build is finished, you'll find the client library at the following location: `${PROJECT_ROOT}/build/src/libaktualizr/libaktualizr_static_lib.a`. +After the build is finished, you'll find the client library at the following location: `$\{PROJECT_ROOT}/build/src/libaktualizr/libaktualizr.so`. == Integrating libaktualizr into your application @@ -75,12 +75,18 @@ $ git clone --recursive https://github.com/advancedtelematic/aktualizr.git add_subdirectory(aktualizr) ---- + -This command automatically adds the needed entries to `INCLUDE_DIRECTORIES` variable, create the `AKTUALIZR_EXTERNAL_LIBS` variable, which contains the list of external libraries required for linking with libaktualizr, and create the `aktualizr_static_lib` target. +This command automatically adds the needed entries to `INCLUDE_DIRECTORIES` variable, create the `AKTUALIZR_EXTERNAL_LIBS` variable, which contains the list of external libraries required for linking with libaktualizr, and create the `aktualizr_static_lib` and `aktualizr_lib` targets. . Do a test build of your application + Whenever you build your application you must add these libraries as follows: + [source,cmake] +target_link_libraries(your-app aktualizr_lib) ++ +Or: ++ +[source,cmake] +# DEPRECATED! target_link_libraries(your-app aktualizr_static_lib ${AKTUALIZR_EXTERNAL_LIBS}) + You also might need to add the following line if you are using boost libraries: @@ -93,7 +99,7 @@ If you don't want to make libaktualizr part of your cmake project, it's also pos Here's an example of how to specify the required libraries for an out-of-tree build in cmake project: [source,cmake] ---- -target_link_libraries(your-app ${AKTUALIZR_PROJECT_DIR}/build/src/libaktualizr/libaktualizr_static_lib.a) +target_link_libraries(your-app aktualizr_lib) target_link_libraries(your-app pthread) target_link_libraries(your-app archive) target_link_libraries(your-app boost_atomic) @@ -125,14 +131,14 @@ Libaktualizr provides a C++ API for fetching information about available updates If you're yet not familiar with OTA Connect concepts such as "campaigns", have a look at the https://connect.ota.here.com/#/campaigns[OTA web app] first. You might need to register for a free developer account first. -The main library header is https://github.com/advancedtelematic/aktualizr/blob/master/src/libaktualizr/primary/aktualizr.h[`primary/aktualizr.h`]. It also includes few other libaktualizr headers. +The main library header is https://github.com/advancedtelematic/aktualizr/blob/master/include/libaktualizr/aktualizr.h[`libaktualizr/aktualizr.h`]. It also includes few other libaktualizr headers. -To use the API, add the `src/libaktualizr` directory to your include path and add `# include "primary/aktualizr.h"` to your source file. +To use the API, add the `aktualizr/include` directory to your include path and add `#include ` to your source file. When using the API, consider the following points: * Most of the API calls, unless specified otherwise, are asynchronous and return a `std::future` which contains the corresponding result type. -* Result types are defined in the https://github.com/advancedtelematic/aktualizr/blob/master/src/libaktualizr/primary/results.h[`primary/results.h`] header. +* Result types are defined in the https://github.com/advancedtelematic/aktualizr/blob/master/include/libaktualizr/results.h[`libaktualizr/results.h`] header. * Asynchronous commands are posted to the command queue and executed in sequential order in a separate thread. * If the execution is paused, newly issued commands accumulate in the command queue and it's up to the caller to ensure that the queue doesn't get overloaded with unnecessary duplicate commands. @@ -157,13 +163,13 @@ An instance is constructed based on the provided config. A config should at leas The configuration options depend on the used provisioning type and the local storage which you use to store updates and metadata. For description of all configuration options, refer to the client configuraton xref:aktualizr-config-options.adoc[reference documentation] and to the https://github.com/advancedtelematic/aktualizr/tree/master/config[`config`] folder for configuration examples. -* *Add a new secondary ECU* +* *Add a new Secondary ECU* + [source,cpp] ---- -void Aktualizr::AddSecondary(const std::shared_ptr &secondary) +void Aktualizr::AddSecondary(const std::shared_ptr &secondary) ---- -You must call this function before you call `Initialize`. To find out more about primary and secondary ECUs, see our xref:uptane.adoc#_primary_and_secondary_ecus[Uptane description]. +You must call this function before you call `Initialize`. To find out more about Primary and Secondary ECUs, see our xref:uptane.adoc#_primary_and_secondary_ecus[Uptane description]. * *Initialize aktualizr* + @@ -171,7 +177,7 @@ You must call this function before you call `Initialize`. To find out more about ---- void Aktualizr::Initialize() ---- -Any secondary ECUs should be added before making this +Any Secondary ECUs should be added before making this call. This will provision with the server if required. This must be called before using any other aktualizr functions except `AddSecondary`. * *Set a callback to receive event notifications* @@ -180,7 +186,7 @@ call. This will provision with the server if required. This must be called befor ---- boost::signals2::connection Aktualizr::SetSignalHandler(std::function)> &handler) ---- -Returns a signal connection object, which can be disconnected if desired. The events are defined in the https://github.com/advancedtelematic/aktualizr/blob/master/src/libaktualizr/primary/events.h[`primary/events.h`] header. +Returns a signal connection object, which can be disconnected if desired. The events are defined in the https://github.com/advancedtelematic/aktualizr/blob/master/include/libaktualizr/events.h[`libaktualizr/events.h`] header. * *Pause a command* + @@ -235,11 +241,7 @@ A campaign contains an update which must be accepted by the end user (or on beha ==== Update management commands -[cols="d,a"] - -| TASK | CALL - -* *Sends local device data to the server* +* *Send local device data to the server* + [source,cpp] ---- @@ -253,7 +255,7 @@ This data includes network status, installed packages and hardware information. ---- std::future Aktualizr::CheckUpdates() ---- -Fetches Uptane metadata and check for updates. This collects a client manifest, PUTs it to the director, updates the Uptane metadata (including root and targets), and then checks the metadata for updates to the target software. +Fetches Uptane metadata and check for updates. This collects a client manifest, PUTs it to the Director, updates the Uptane metadata (including Root and Targets), and then checks the metadata for updates to the target software. * *Download target files* + diff --git a/docs/ota-client-guide/modules/ROOT/pages/libaktualizr-why-use.adoc b/docs/ota-client-guide/modules/ROOT/pages/libaktualizr-why-use.adoc index 3b1aad64c7..a3215f59c0 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/libaktualizr-why-use.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/libaktualizr-why-use.adoc @@ -14,20 +14,20 @@ This topic is supposed to outline the main use cases the product aims to address For libaktualizr We already have this topic: https://docs.atsgarage.com/client-config/advanced-update-control-with-libaktualizr.html The following text was taken from the linked topic and is a proposal for the introcdution to the integration guide. -Feel free to adapt it or leave as-is. +Feel free to adapt it or leave as-is. //// The OTA Connect client (aktualizr) is designed to be run as a standalone component on an embedded system and can manage the entire software update process. However, most automotive production use cases will have requirements that go beyond what the standalone client can provide. For example, some in-vehicle interfaces are proprietary and under NDA, so their implementation must be kept separate from aktualizr. -= Why use libaktualizr? +== Why use libaktualizr? You can integrate the OTA update functionality yourself and minimize the involvement of external consultants. For this purpose, you can use libaktualizr to build your own OTA update solution. Typical scenarios for making your own client could be: * You want to integrate OTA Connect functionality with a third-party HMI -* You want to integrate OTA Connect with a third-party interface that installs software on secondary ECUs +* You want to integrate OTA Connect with a third-party interface that installs software on Secondary ECUs * You want to constrain network traffic and software updates to specific vehicle states * You want to provide motorists or service staff with progress indicators for specific software updates. -To get started, have a look at our https://github.com/advancedtelematic/libaktualizr-demo[demo app] in GitHub or read through our guide to xref:libaktualizr-getstarted.adoc[getting started with libaktualizr]. \ No newline at end of file +To get started, have a look at our https://github.com/advancedtelematic/libaktualizr-demo[demo app] in GitHub or read through our guide to xref:libaktualizr-getstarted.adoc[getting started with libaktualizr]. diff --git a/docs/ota-client-guide/modules/ROOT/pages/meta-updater-build.adoc b/docs/ota-client-guide/modules/ROOT/pages/meta-updater-build.adoc deleted file mode 100644 index 57f99c2c98..0000000000 --- a/docs/ota-client-guide/modules/ROOT/pages/meta-updater-build.adoc +++ /dev/null @@ -1,70 +0,0 @@ -= Build -ifdef::env-github[] - -[NOTE] -==== -We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. -==== -endif::[] - -:meta-updater-github-url: https://github.com/advancedtelematic/meta-updater/tree/master - -== Quickstart - -If you don't already have a Yocto project that you want to add OTA to, you can use the xref:dev@getstarted::raspberry-pi.adoc[HERE OTA Connect Quickstart] project to rapidly get up and running on a Raspberry Pi. It takes a standard https://www.yoctoproject.org/tools-resources/projects/poky[poky] distribution, and adds OTA and OSTree capabilities. - -== Dependencies - -//MC: TOMERGE: These "dependencies" mostly just duplicates the prerequisite sections: https://main.gitlab.in.here.com/olp/edge/ota/documentation/ota-connect-docs/blob/master/docs/getstarted/modules/ROOT/pages/raspberry-pi.adoc - -In addition to the link:https://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#required-packages-for-the-build-host[standard Yocto dependencies], meta-updater generally requires a few additional dependencies, depending on your use case and target platform. To install these additional packages on Debian/Ubuntu, run this: - -.... -sudo apt install cpu-checker default-jre parted -.... - -To build for https://github.com/advancedtelematic/meta-updater-minnowboard[Minnowboard] with GRUB, you will also need to install https://github.com/tianocore/tianocore.github.io/wiki/OVMF[TianoCore's ovmf] package on your host system. On Debian/Ubuntu, you can do so with this command: - -.... -sudo apt install ovmf -.... - -== Adding meta-updater capabilities to your build - -// MC: TOMERGE: This content mosty duplicates https://github.com/advancedtelematic/aktualizr/blob/master/docs/ota-client-guide/modules/ROOT/pages/add-ota-functonality-existing-yocto-project.adoc - -If you already have a Yocto-based project and you want to add atomic filesystem updates to it, you just need to do three things: - -1. Clone the `meta-updater` layer and add it to your https://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#structure-build-conf-bblayers.conf[bblayers.conf]. -2. Clone BSP integration layer (`meta-updater-$\{PLATFORM}`, e.g. https://github.com/advancedtelematic/meta-updater-raspberrypi[meta-updater-raspberrypi]) and add it to your `conf/bblayers.conf`. If your board isn't supported yet, you could write a BSP integration for it yourself. See the <> section for the details. -3. Set up your https://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-DISTRO[distro]. If you are using "poky", the default distro in Yocto, you can change it in your `conf/local.conf` to "poky-sota". Alternatively, if you are using your own or third party distro configuration, you can add `INHERIT += " sota"` to it, thus combining capabilities of your distro with meta-updater features. - -You can then build your image as usual, with bitbake. After building the root file system, bitbake will then create an https://ostree.readthedocs.io/en/latest/manual/adapting-existing/[OSTree-enabled version] of it, commit it to your local OSTree repo and (optionally) push it to a remote server. Additionally, a live disk image will be created (normally named `$\{IMAGE_NAME}.-sdimg-ota` e.g. `core-image-raspberrypi3.rpi-sdimg-ota`). You can control this behaviour through <>. - -== Build in AGL - -// MC: TOMERGE: This content duplicates https://main.gitlab.in.here.com/olp/edge/ota/documentation/ota-connect-docs/blob/master/docs/getstarted/modules/ROOT/pages/automotive-grade-linux.adoc (except that it is a lot sparser) - -With AGL you can just add agl-sota feature while configuring your build environment: - -.... -source meta-agl/scripts/aglsetup.sh -m porter agl-demo agl-appfw-smack agl-devel agl-sota -.... - -You can then run: - -.... -bitbake agl-demo-platform -.... - -and get as a result an `ostree_repo` folder in your images directory (`tmp/deploy/images/$\{MACHINE}/ostree_repo`). It will contain: - -* your OSTree repository, with the rootfs committed as an OSTree deployment, -* an `ota-ext4` bootstrap image, which is an OSTree physical sysroot as a burnable filesystem image, and optionally -* some machine-dependent live images (e.g. `.wic` for Raspberry Pi or `.porter-sdimg-ota` for Renesas Porter board). - -Although `aglsetup.sh` hooks provide reasonable defaults for SOTA-related variables, you may want to tune some of them. - -== Build problems - -Ubuntu users that encounter an error due to missing `Python.h` should install `libpython2.7-dev` on their host machine. diff --git a/docs/ota-client-guide/modules/ROOT/pages/meta-updater-dev-config.adoc b/docs/ota-client-guide/modules/ROOT/pages/meta-updater-dev-config.adoc deleted file mode 100644 index 584beb6403..0000000000 --- a/docs/ota-client-guide/modules/ROOT/pages/meta-updater-dev-config.adoc +++ /dev/null @@ -1,54 +0,0 @@ -= Development configuration -ifdef::env-github[] - -[NOTE] -==== -We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. -==== -endif::[] - -:meta-updater-github-url: https://github.com/advancedtelematic/meta-updater/tree/master - -//MC: The dev guide already has a recommended config topic: https://github.com/advancedtelematic/aktualizr/blob/master/docs/ota-client-guide/modules/ROOT/pages/recommended-clientconfig.adoc -// This content pretty much serves the same purpose except 'local.conf' instead of 'sota_conf.toml' Clean this up and use an :include: ref reuse in that topic? - -== Logging - -To troubleshoot problems that you might encounter during development, we recommend that you enable persistent `systemd` logging. This setting is enabled by default for newly configured environments (see link:{meta-updater-github-url}/conf/local.conf.sample.append[]). To enable it manually, put this to your `local.conf`: - -.... -IMAGE_INSTALL_append += " systemd-journald-persistent" -.... - -It may also be helpful to run with debug logging enabled in aktualizr. To do so, add this to your `local.conf`: - -.... -IMAGE_INSTALL_append += " aktualizr-log-debug" -.... - -== Custom aktualizr versions - -You can override the version of aktualizr included in your image. This requires that the version you wish to run is pushed to the https://github.com/advancedtelematic/aktualizr[aktualizr github repo]. You can then use these settings in your `local.conf` to simplify the development process: - -[options="header"] -|====================== -| Option | Effect -| `require classes/sota_bleeding.inc` | Build the latest head (by default, using the master branch) of Aktualizr -| `BRANCH_pn-aktualizr = "mybranch"` - -`BRANCH_pn-aktualizr-native = "mybranch"` | Build `mybranch` of Aktualizr. Note that both of these need to be set. This is normally used in conjunction with `require classes/sota_bleeding.inc` -| `SRCREV_pn-aktualizr = "1004efa3f86cef90c012b34620992b5762b741e3"` - -`SRCREV_pn-aktualizr-native = "1004efa3f86cef90c012b34620992b5762b741e3"` | Build the specified revision of Aktualizr. Note that both of these need to be set. This can be used in conjunction with `BRANCH_pn-aktualizr` and `BRANCH_pn-aktualizr-native` but will conflict with `require classes/sota_bleeding.inc` -| `TOOLCHAIN_HOST_TASK_append = " nativesdk-cmake "` | Use with `bitbake -c populate_sdk core-image-minimal` to build an SDK. See the https://github.com/advancedtelematic/aktualizr#developing-against-an-openembedded-system[aktualizr repo] for more information. -|====================== - -== Overriding target version -*Warning: overriding target version is a dangerous operation, make sure you understand this section completely before doing it.* - -Every time you build an image with `SOTA_PACKED_CREDENTIALS` set, a new entry in your Uptane metadata is created and you can see it in the OTA Garage UI if you're using one. Normally this version will be equal to OSTree hash of your root file system. If you want it to be different though you can override is using one of two methods: - -1. Set `GARAGE_TARGET_VERSION` variable in your `local.conf`. -2. Write a recipe or a bbclass to write the desired version to `${STAGING_DATADIR_NATIVE}/target_version`. An example of such bbclass can be found in `classes/target_version_example.bbclass`. - -Please note that [target name, target version] pairs are expected to be unique in the system. If you build a new target with the same target version as a previously built one, the old package will be overwritten on the update server. It can have unpredictable effect on devices that have this version installed, and it is not guaranteed that information will be reported correctly for such devices or that you will be able to update them (we're doing our best though). The easiest way to avoid problems is to make sure that your overriding version is as unique as an OSTree commit hash. diff --git a/docs/ota-client-guide/modules/ROOT/pages/meta-updater-provisioning-methods.adoc b/docs/ota-client-guide/modules/ROOT/pages/meta-updater-provisioning-methods.adoc deleted file mode 100644 index 9156a795d9..0000000000 --- a/docs/ota-client-guide/modules/ROOT/pages/meta-updater-provisioning-methods.adoc +++ /dev/null @@ -1,30 +0,0 @@ -= Manual provisioning -ifdef::env-github[] - -[NOTE] -==== -We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. -==== -endif::[] - - -//MC: TOMERGE: Looks mostly like a duplicate of this topic: https://github.com/advancedtelematic/aktualizr/blob/master/docs/ota-client-guide/modules/ROOT/pages/simulate-device-cred-provtest.adoc - -As described in the xref:xref:dev@ota-build::sota-variables.adoc[configuration reference], you can set `SOTA_DEPLOY_CREDENTIALS` to `0` to prevent deploying credentials to the built `wic` image. In this case you get a generic image that you can use e.g. on a production line to flash a series of devices. The cost of this approach is that this image is half-baked and should be provisioned before it can connect to the backend. - -Provisioning procedure depends on your provisioning recipe, i.e. the value of `SOTA_CLIENT_PROV` (equal to `aktualizr-shared-prov` by default): - -* For `aktualizr-shared-prov` put your `credentials.zip` to `/var/sota/sota_provisioning_credentials.zip` on the filesystem of a running device. If you have the filesystem of our device mounted to your build machine, prefix all paths with `/ostree/deploy/poky` as in `/ostree/deploy/poky/var/sota/sota_provisioning_credentials.zip`. -* For `aktualizr-device-prov` -** put URL to the backend server (together with protocol prefix and port number) at `/var/sota/gateway.url`. If you're using HERE OTA Connect, you can find the URL in the `autoprov.url` file in your credentials archive. -** put client certificate, private key and root CA certificate (for the *server*, not for the *device*) at `/var/sota/import/client.pem`, `/var/sota/import/pkey.pem` and `/var/sota/import/root.crt` respectively. -* For `aktualizr-device-prov-hsm` -** put URL to the server backend (together with protocol prefix and port number) at `/var/sota/gateway.url`. If you're using HERE OTA Connect, you can find the URL in the `autoprov.url` file in your credentials archive. -** put root CA certificate (for the *server*, not for the *device*) at `/var/sota/import/root.crt`. -** put client certificate and private key to slots 1 and 2 of the PKCS#11-compatible device. - -For more extensive information on provisioning methods, see the following topics from the OTA Connect Developer guide: - -//MC: Web links because this topic is only viewable in Github -* link:https://docs.ota.here.com/ota-client/dev/client-provisioning-methods.html[Device provisioning methods] -* link:https://docs.ota.here.com/ota-client/dev/enable-device-cred-provisioning.html[Enable device-credential provisioning and install device certificates] diff --git a/docs/ota-client-guide/modules/ROOT/pages/meta-updater-testing.adoc b/docs/ota-client-guide/modules/ROOT/pages/meta-updater-testing.adoc index 63460af6b3..4dc94e57b7 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/meta-updater-testing.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/meta-updater-testing.adoc @@ -1,4 +1,4 @@ -= Testing += Testing meta-updater ifdef::env-github[] [NOTE] @@ -7,12 +7,11 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] - -//MC: No overlap with any content currently in the developer guide, but probably useful content to clean up and include. +This page describes the tools available to test link:https://github.com/advancedtelematic/meta-updater[meta-updater] in an automated fashion. == QA with oe-selftest -This layer relies on the test framework oe-selftest for quality assurance. Currently, you will need to run this in a build directory with `MACHINE` set to `qemux86-64`. Follow the steps below to run the tests: +meta-updater uses the oe-selftest framework for quality assurance. Currently, you will need to run this in a build directory with `MACHINE` set to `qemux86-64`. Follow the steps below to run the tests: 1. Append the line below to `conf/local.conf` to disable the warning about supported operating systems: + @@ -26,9 +25,9 @@ SANITY_TESTED_DISTROS = "" IMAGE_INSTALL_append = " dropbear " ``` -3. Some tests require that `SOTA_PACKED_CREDENTIALS` is set in your `conf/local.conf`. See the xref:build-configuration.adoc[SOTA-related variables in local.conf]. +3. Some tests require that `SOTA_PACKED_CREDENTIALS` is set in your `conf/local.conf`. See the xref:build-configuration.adoc[build configuration] page for more details. -4. To be able to build an image for the GRUB tests, you will need to install the `ovmf` package as described in the xref:meta-updater-build.adoc#_dependencies[dependencies section]. +4. To be able to build an image for the GRUB tests, you will need to install the `ovmf` package. This is described in the link:https://github.com/advancedtelematic/meta-updater[meta-updater README]. 5. Run oe-selftest: + diff --git a/docs/ota-client-guide/modules/ROOT/pages/meta-updater-usage.adoc b/docs/ota-client-guide/modules/ROOT/pages/meta-updater-usage.adoc index 2e7c07ef0b..f80f81b989 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/meta-updater-usage.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/meta-updater-usage.adoc @@ -1,4 +1,4 @@ -= Usage += Advanced usage of meta-updater ifdef::env-github[] [NOTE] @@ -8,76 +8,44 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} endif::[] :meta-updater-github-url: https://github.com/advancedtelematic/meta-updater/tree/master -:metadata-expiry-article: xref:dev@ota-client::metadata-expiry.adoc[OTA Connect documentation] -ifdef::env-github[:metadata-expiry-article: link:https://docs.ota.here.com/ota-client/dev/metadata-expiry.html[OTA Connect documentation]] -//MC: No overlap with any content currently in the developer guide, but probably useful content to clean up and include. eg: use cases. +This page describes advanced features of meta-updater. -== OSTree - -OSTree used to include a simple HTTP server as part of the ostree binary, but this has been removed in more recent versions. However, OSTree repositories are self-contained directories, and can be trivially served over the network using any HTTP server. For example, you could use Python's SimpleHTTPServer: - -.... -cd tmp/deploy/images/qemux86-64/ostree_repo -python -m SimpleHTTPServer # port defaults to 8000 -.... - -You can then run ostree from inside your device by adding your repo: - -.... -# This behaves like adding a Git remote; you can name it anything -ostree remote add --no-gpg-verify my-remote http://: - -# If OSTREE_BRANCHNAME is set in local.conf, that will be the name of the -# branch. If not set, it defaults to the value of MACHINE (e.g. qemux86-64). -ostree pull my-remote - -# poky is the OS name as set in OSTREE_OSNAME -ostree admin deploy --os=poky my-remote: -.... - -After restarting, you will boot into the newly deployed OS image. - -For example, on the raspberry pi you can try this sequence: - -.... -# add remote -ostree remote add --no-gpg-verify agl-snapshot https://download.automotivelinux.org/AGL/snapshots/master/latest/raspberrypi3/deploy/images/raspberrypi3/ostree_repo/ agl-ota - -# pull -ostree pull agl-snapshot agl-ota +== garage-push -# deploy -ostree admin deploy --os=agl agl-snapshot:agl-ota -.... +The https://github.com/advancedtelematic/aktualizr[aktualizr repo] contains a tool, garage-push, which lets you push the changes in OSTree repository generated by bitbake process. It communicates with an http server capable of querying files with HEAD requests and uploading them with POST requests. -== garage-push +This tool is used automatically as part of every normal Yocto build in our xref:getstarted::get-started.adoc[quick start tutorials], but can be used manually or used to communicate with another server if desired. -The https://github.com/advancedtelematic/aktualizr[aktualizr repo] contains a tool, garage-push, which lets you push the changes in OSTree repository generated by bitbake process. It communicates with an http server capable of querying files with HEAD requests and uploading them with POST requests. In particular, this can be used with https://connect.ota.here.com/[HERE OTA Connect]. garage-push is used as follows: +If you set `SOTA_PACKED_CREDENTIALS` in your `local.conf`, bitbake will automatically synchronize your build results with the remote server specified in your credentials file. (See the xref:provisioning-methods-and-credentialszip.adoc[provisioning method and credentials.zip] page for more information.) garage-push can also be used on the command line as follows: .... garage-push --repo=/path/to/ostree-repo --ref=mybranch --credentials=/path/to/credentials.zip .... -You can set `SOTA_PACKED_CREDENTIALS` in your `local.conf` to automatically synchronize your build results with a remote server. Credentials are stored in an archive as described in the xref:dev@ota-client::provisioning-methods-and-credentialszip.adoc[aktualizr documentation]. +== garage-sign + +meta-updater also uses a tool, https://github.com/advancedtelematic/ota-tuf/tree/master/cli[garage-sign], for signing Uptane metadata and communicating with a remote server. It can be configured with variables described in the xref:build-configuration.adoc[build configuration] section. Of particular importance is controlling the expiration of the Targets metadata, which is described in detail in the xref:metadata-expiry.adoc[metadata expiry] page. garage-sign can also be used to xref:rotating-signing-keys.adoc[rotate your signing keys]. == aktualizr configuration -https://github.com/advancedtelematic/aktualizr[Aktualizr] supports a variety of xref:dev@ota-client::aktualizr-config-options.adoc[configuration options via a configuration file and the command line]. There are two primary ways to control aktualizr's configuration from meta-updater. +https://github.com/advancedtelematic/aktualizr[Aktualizr] supports a variety of xref:aktualizr-config-options.adoc[configuration options via a configuration file and the command line]. There are two primary ways to control aktualizr's configuration from meta-updater. -First, you can set `SOTA_CLIENT_PROV` to control which provisioning recipe is used. Each recipe installs an appropriate `sota.toml` file from aktualizr according to the provisioning needs. See the xref:build-configuration.adoc[SOTA-related variables in local.conf] section for more information. +First, you can set `SOTA_CLIENT_PROV` to control which provisioning recipe is used. Each recipe installs an appropriate `sota.toml` file from aktualizr according to the provisioning needs. See the xref:build-configuration.adoc[build configuration] section for more information. -Second, you can write recipes to install additional config files with customized options. A few recipes already exist to address common needs and provide an example: +Second, you can write recipes to install additional config files with customized options. A few recipes already exist as examples and to address common needs: * link:{meta-updater-github-url}/recipes-sota/config/aktualizr-auto-reboot.bb[aktualizr-auto-reboot.bb] configures aktualizr to automatically reboot after new updates are installed in order to apply the updates immediately. This is only relevant for package managers (such as OSTree) that require a reboot to complete the installation process. If this is not enabled, you will need to reboot the system through other means. * link:{meta-updater-github-url}/recipes-sota/config/aktualizr-disable-send-ip.bb[aktualizr-disable-send-ip.bb] disables the reporting of networking information to the server. This is enabled by default and supported by https://connect.ota.here.com/[HERE OTA Connect]. However, if you are using a different server that does not support this feature, you may want to disable it in aktualizr. * link:{meta-updater-github-url}/recipes-sota/config/aktualizr-log-debug.bb[aktualizr-log-debug.bb] sets the log level of aktualizr to 0 (trace). The default is 2 (info). This recipe is intended for development and debugging purposes. +* link:{meta-updater-github-url}/recipes-sota/config/aktualizr-polling-interval.bb[aktualizr-polling-interval.bb] sets the polling interval of aktualizr to the value of `SOTA_POLLING_SEC`. See the xref:build-configuration.adoc[build configuration] and xref:recommended-clientconfig.adoc[recommended configuration] sections for more information. +* link:{meta-updater-github-url}/recipes-sota/config/aktualizr-virtualsec.bb[aktualizr-virtualsec.bb] creates a virtual Secondary on the Primary. This can be used for testing purposes or to update a file on the Primary outside of OSTree. To use these recipes, you will need to add them to your image with a line such as `IMAGE_INSTALL_append = " aktualizr-log-debug "` in your `local.conf`. == aktualizr service resource control -With systemd based images, it is possible to set resource policies for the aktualizr service. The main use case is to provide a safeguard against resource exhaustion during an unforeseen failure scenario. +With systemd-based images, it is possible to set resource policies for the aktualizr service. The main use case is to provide a safeguard against resource exhaustion during an unforeseen failure scenario. To enable it, install `aktualizr-resource-control` on the target image and optionally override the default resource limits set in link:{meta-updater-github-url}/recipes-sota/aktualizr/aktualizr_git.bb[aktualizr_git.bb], from your `local.conf`. @@ -88,8 +56,3 @@ IMAGE_INSTALL_append += " aktualizr-resource-control " RESOURCE_CPU_WEIGHT_pn-aktualizr = "50" .... -=== garage-sign configuration - -The https://github.com/advancedtelematic/ota-tuf/tree/master/cli[garage-sign] tool can be configured with variables described in the xref:build-configuration.adoc[SOTA-related variables in local.conf] section. - -Of particular importance is controlling the expiration of the Targets metadata signed with garage-sign. This is described in detail in the {metadata-expiry-article}. To set a manual expiration date, you can use either of the variables `GARAGE_TARGET_EXPIRES` or `GARAGE_TARGET_EXPIRE_AFTER`. Both cannot be supplied simultaneously. If neither are provided, a default of one month will be used. \ No newline at end of file diff --git a/docs/ota-client-guide/modules/ROOT/pages/metadata-expiry.adoc b/docs/ota-client-guide/modules/ROOT/pages/metadata-expiry.adoc index 7493384c7d..97a33f8567 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/metadata-expiry.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/metadata-expiry.adoc @@ -7,20 +7,19 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] - -Once you xref:rotating-signing-keys.adoc[take the keys for signing metadata offline], you need to be aware of when this metadata expires. You need to refresh the expiry date before it is reached, otherwise you won't be able to push updates. You can also define your own expiry dates when you take your keys offline. +If you xref:rotating-signing-keys.adoc[take the keys for signing metadata offline], you need to refresh the metadata expiry date before it is reached. Otherwise, you won't be able to push updates. You can also define your own expiry dates when you take your keys offline. The default expiry dates are as follows: -* For `targets.json`, the expiry date is **31 days** from when the metadata was last updated. -* For `root.json`, the expiry date is **365 days** from when the metadata was last updated. +* For `targets.json`, the expiry date is **31 days** from the last time you ceated or modified metadata. +* For `root.json`, the expiry date is **365 days** from the last time you ceated or modified metadata. == Define your own expiry dates -How you define the expiry dates depends on how you use OTA Connect: +Depending on how you use OTA Connect, set the expiry dates in one of the following ways: -* If you're building disk images, you need to update your *build configuration*. -* If you're using the standalone command-line tools such as xref:install-garage-sign-deploy.adoc[`garage-deploy`], you need to add extra *command-line arguments*. +* If you build software images using Yocto and our `meta-updater` layer, update your *build configuration*. +* If you use the standalone command-line tools, add extra *command-line arguments*. [{tabs}] @@ -59,7 +58,9 @@ GARAGE_TARGET_EXPIRE_AFTER = "1Y3M5D" Command-line arguments:: + -- -When you're using the `garage-sign` command to take your keys offline, you can also sign your metadata with one of the following expiry arguments. +If you use the `garage-sign` command to take your keys offline, you can also sign your metadata with one of the following expiry arguments. + +For more information, see the `garage-sign` xref:garage-sign-reference.adoc[reference] documentation. .Command-line arguments for metadata expiry [cols="2a,4a",options="header"] @@ -70,6 +71,7 @@ When you're using the `garage-sign` command to take your keys offline, you can a | include::partial$config-descriptions.adoc[tags=metadata-expires] +[source, bash] ---- garage-sign targets sign --expires 2018-01-01T00:01:00Z --repo myimagerepo --key-name mytargets ---- @@ -78,6 +80,8 @@ garage-sign targets sign --expires 2018-01-01T00:01:00Z --repo myimagerepo --ke `--expire-after` | include::partial$config-descriptions.adoc[tags=metadata-expireafter] + +[source, bash] ---- garage-sign targets sign ----expire-after 1Y3M5D --repo myimagerepo --key-name mytargets ---- diff --git a/docs/ota-client-guide/modules/ROOT/pages/ostree-and-treehub.adoc b/docs/ota-client-guide/modules/ROOT/pages/ostree-and-treehub.adoc index b37838f00c..dc3f2e9abb 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/ostree-and-treehub.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/ostree-and-treehub.adoc @@ -1,4 +1,4 @@ -== OSTree += OSTree and TreeHub ifdef::env-github[] [NOTE] @@ -7,10 +7,12 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] +== OSTree + -link:http://ostree.readthedocs.io/en/latest/[OSTree] is an open-source tool that combines a "git-like" model for committing and downloading bootable filesystem trees, along with a layer for deploying them and managing the bootloader configuration. It is actively developed and support by Red Hat, and used in link:http://flatpak.org/[flatpak] and link:http://www.projectatomic.io/[Project Atomic]. +link:http://ostree.readthedocs.io/en/latest/[OSTree] is an open-source tool that combines a "git-like" model for committing and downloading bootable filesystem trees, along with a layer for deploying them and managing the bootloader configuration. It is actively developed and support by Red Hat, and used in link:http://flatpak.org/[flatpak] and link:https://github.com/projectatomic/[Project Atomic]. -For more on why OSTree is the best tool for the job of doing embedded device updates, you can also jump straight to xref:ostree-and-treehub.adoc#_comparing_full_filesystem_update_strategies[Comparing full-filesystem update strategies]. +For more on why OSTree is the best tool for the job of doing embedded device updates, you can also jump straight to xref:comparing-full-filesystem-update-strategies.adoc[Comparing full-filesystem update strategies]. .Doing It Wrong™: Bad choices for embedded updates **** @@ -21,39 +23,10 @@ Hey, every Linux distro does it this way--why don't I just use a dpkg/rpm/apk-ba The problem with this approach is that updates aren't guaranteed to be atomic, so it's quite easy to get the system into a state that requires user intervention to fix, especially if it's rebooted during an update. That might be fine on the desktop or on a server where there's a reasonable expectation that a user could intervene, but it doesn't work for embedded devices. * "Update Mode" and similar designs + -The idea here is that you boot into a mode that allows the root filesystem to be overwritten, either via a pre-downloaded image or via something streamed over the network. Fortunately, we rarely see this design in the wild anymore, with the occasional exception of industrial control systems where the update process is closely supervised. When we do see it, it's typically a legacy of days when microcontroller flashing would be done in person, with a new image streamed over a serial interface to overwrite the old system. This is a very poor choice for embedded, because of the risk that users may disconnect the device while the new image is being flashed, potentially bricking the device or requiring a more difficult intervention to fix. Lower-end home routers and DSL modems sometimes go this route; doing a quick Google search for "link:https://www.google.com/search?q=firmware+update+bricked+router[firmware update bricked router]" should show why this is a bad idea. +The idea here is that you boot into a mode that allows the root filesystem to be overwritten, either via a pre-downloaded image or via something streamed over the network. Fortunately, we rarely see this design in the wild anymore, unless it's coupled with another, specialized device like a diagnostics gateway or flashing unit inside a vehicle that implements complex recovery and rollback logic. Without such mitigations, though, this is a very poor choice for embedded, because of the risk that users may disconnect the device while the new image is being flashed. That could potentially brick the device, or require an in-person service call to fix. Lower-end home routers and DSL modems sometimes still choose this method; doing a quick Google search for "link:https://www.google.com/search?q=firmware+update+bricked+router[firmware update bricked router]" should show why this is a bad idea. **** == TreeHub Since OSTree is "git-like", you can probably imagine that you can have remote repositories. TreeHub is exactly that. It's seamlessly integrated into the meta-updater layer and into the {product-name} site itself. Your builds get automatically pushed to TreeHub as soon as you make them, and you can use {product-name-short} to wirelessly update your devices--one at a time, or in targeted campaigns. You can even set certain devices to automatically pull updates from TreeHub as soon as they're pushed, and stop wasting time re-flashing the units on your test bench every time you build new code. -== Comparing full-filesystem update strategies - -OSTree provides a number of very significant technological advantages over other full-filesystem updating schemes. For embedded systems that need a solution for safe, atomic, full-filesystem updates, the usual approach is to have some kind of *dual-bank* scheme. Here, we're going to take a look at the difference between OSTree and dual-bank systems, and the advantages OSTree can provide. - -=== Dual-bank - -In a dual-bank system, the read-only root filesystem is kept on a different partition from the writable user space, so that when an update is needed the whole partition can be overwritten. For atomicity and safety, this read-only partition is duplicated: there are two complete copies of the filesystem, kept on different partitions, and the active partition can be selected at boot time. - -When the system needs to be updated, the new filesystem image is written to the inactive partition, and the next time the system reboots, that partition becomes the active one. - -.Dual-bank update process (click to enlarge) -[caption="Figure 1: ",link={attachmentsdir}/dual-bank-system-update-flow.svg] -image::dual-bank-system-update-flow.svg[] - -The main advantage of this update model is its safety. Updates are always strictly atomic, and there is always a known good image that can be rolled back to. However, there are significant trade-offs in flexibility and materials costs that must be made: the size of the root partition must be chosen when the system is flashed for the very first time, and the duplication of the root partition doubles the space required. When choosing how big to make the root partition, a device manufacturer has to consider not just how big their filesystem image currently is, but also must estimate and plan for the size of all future updates. If the size chosen is too small, it may restrict the ability to add new features. Making it larger, of course, adds to the bill of goods for the product--and since it's duplicated, every extra megabyte of future capacity actually costs two megabytes to accommodate. - -=== OSTree - -OSTree checksums individual files and stores them as content-addressed objects, much like git. The read-only filesystem is built by "checking out" a particular revision, and hardlinking the content-addressed objects into the actual Linux directory structure. Multiple filesystem versions can be stored, and any content that is duplicated across versions is only stored once. A complete history of all versions is stored in TreeHub, but it is not required to store that complete revision history on the device. Only one partition is needed--writable user space can be on the same partition as the OSTree content store. - -When the system needs to be updated, {product-name} sends a small metadata file with a particular commit identifier. The client pulls that commit from TreeHub, only downloading the new files, and only downloading binary diffs of changed files. Once the pull is complete and verified, the system is instructed to boot into the new version the next time it starts up. - -.OSTree update process (click to enlarge) -[caption="Figure 2: ",link={attachmentsdir}/ostree-update-flow.svg] -image::ostree-update-flow.svg[] - -With OSTree, you no longer need to guess how much room you might need in the future to expand your system; the OSTree content store expands and contracts as needed. You also save a significant amount of space, since only diffs between versions need to be stored. OSTree also allows you to garbage-collect old images: if you upgrade 1.0 -> 1.1 -> 1.2, for example, by default the {product-name-short} client will garbage-collect all local objects unique to 1.0. If you decided later on that you in fact did want to go back to v1.0, you still could: if you pushed v1.0 from {product-name-short}, the client would download only the diff from TreeHub, repopulate the local object store, and then reboot into that version. Of course, it's also possible to configure OSTree to keep more than two revisions on the local disk; this can be particularly useful in QA workflows, allowing for rapid testing of a feature or an external integration against multiple different firmware versions. - -Best yet, you get all of these benefits *without having to give up the safety of a dual-bank setup*. Updates are still strictly atomic; if power is lost during the download of an update, the client will still boot into the old system when it starts up next, and will simply resume the download it had begun. You still always have a known good image on the system to roll back to; in fact, as stated above, you can keep an arbitrarily large number of revisions--an impossibility in a dual-bank system. diff --git a/docs/ota-client-guide/modules/ROOT/pages/ostree-usage.adoc b/docs/ota-client-guide/modules/ROOT/pages/ostree-usage.adoc index cff4dc35f4..05ca7aeec6 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/ostree-usage.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/ostree-usage.adoc @@ -13,32 +13,51 @@ endif::[] :page-order: 3 :icons: font -Sometimes, while troubleshooting, it might be helpful to see and manipulate your local OSTree repos. You can do that using the copy of ostree that bitbake builds. For the rest of these commands, we'll assume that you've exported the executable as `$OSTREE`, and the location of your local repo as `$REPO` (for example, like so): +Sometimes, while troubleshooting, it might be helpful to see and manipulate your local OSTree repos. You can do that using the copy of `ostree` that bitbake builds. For the rest of these commands, we'll assume that you've exported the executable as `$OSTREE`, and the location of your local repo as `$REPO` (for example, like so): export OSTREE=$(pwd)/tmp/sysroots/x86_64-linux/usr/bin/ostree export REPO=$(pwd)/tmp/deploy/images/raspberrypi3/ostree_repo/ -=== Get a list of all branches in the repo +== Get a list of all branches in the repo You'll need a branch name for most other commands, so it's often useful to check your list of branches: $OSTREE refs --repo $REPO -The branch name defaults to {MACHINE}-ota, so if you were building for raspberrypi3, your branch name would be raspberrypi3-ota by default. However, you can set the branch name for your build in local.conf using the *OSTREE_BRANCHNAME* configuration option, letting you keep your different builds, projects, or branches under different names. +The branch name defaults to \{MACHINE}, so if you were building for raspberrypi3, your branch name would be raspberrypi3 by default. However, you can set the branch name for your build in `local.conf` using the `OSTREE_BRANCHNAME` configuration option, letting you keep your different builds, projects, or branches under different names. See the xref:build-configuration.adoc[build configuration article] for more information. -=== Show the log for a particular branch +== Show the log for a particular branch $OSTREE log --repo $REPO [branchname] -=== List files in a particular commit +== List files in a particular commit $OSTREE ls --repo $REPO [commit] [path] The -R option is supported, for recursive file listing. -=== See a diff between two commits or references +== See a diff between two commits or references $OSTREE diff --repo $REPO [ref1] [ref2] Refs can be branch names or commit hashes, much like in git. Note that this does not show the contents of the diff; it just shows files added, deleted, and modified. +== Pruning unused/unwanted objects + +OSTree normally manages garbage collection of objects on its own, but if your OSTree repo in your build directory is too large, you can manually remove unwanted commits and objects. You can also use the `ostree` tool on your device to remove coommits and objects on your device. This may be useful if you know that a certain commit has a flaw that you do not want to let get deployed. + +First, you will need to find which commits and refs are currently not deployed and are no longer needed. You can use these commands to help make that determination: + + ostree admin status + ostree refs + ostree log + +Then, for each ref that you would like to remove: + + ostree refs --delete + +And then for each commit that you would like to remove: + + ostree prune --delete-commit= + +Note that `ostree admin status` will still show deleted commits, but if you try to deploy a deleted commit, the operation will fail as expected. diff --git a/docs/ota-client-guide/modules/ROOT/pages/pki.adoc b/docs/ota-client-guide/modules/ROOT/pages/pki.adoc index 5f5d944cac..541633421e 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/pki.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/pki.adoc @@ -8,30 +8,27 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} endif::[] -Once you move to production, we recommend that you manage these keys offline in your own PKI rather than having all keys managed on the OTA Connect server. +OTA Connect uses public key cryptography to protect sensitive data. Once you move to production, we recommend that you manage these keys offline rather than having all keys managed on the OTA Connect server. -OTA Connect uses pairs of public and private keys to protect sensitive data. Normally, you use a PKI (Public Key Infrastructure) to manage these keys. +== Risks keeping your keys on the OTA Connect server -== Risks of using OTA Connect as your PKI +When you first sign up for an account, OTA Connect generates all of the keys you need and securely stores them, so you don't have to worry about key management to get started. -By default, OTA Connect server plays the role of a PKI so you don't have to think about key management. This is useful if you don't yet have your own PKI, but not so secure. +However, this means that OTA Connect account password is the only thing standing between an attacker and your devices. If your password was guessed or stolen, the attacker would be install whatever software they wanted on any of devices OTA Connect manages. If your device happens to be a vehicle, such a breach could have very dangerous consequences. -If an attacker were able to take over your OTA Connect account, they would be able to provision their own devices and send malicious updates to your devices. If your device happens to be a vehicle, such a breach could have very dangerous consequences. - -This is why we recommend that you use your own PKI in production. +This is why we recommend xref:rotating-signing-keys.adoc[rotating your software signing keys offline] and xref:client-provisioning-methods.adoc[choosing an appropriately secure provisioning method]. == Key Types -If you follow our security recommendations, you'll need to manage several different keys. +If you follow our highest security recommendations, you'll need to manage several different keys. .Key Types [width="100%",cols="2,2,4",options="header"] |==================== -| Key Name | Purpose | Description -| Fleet Root | Device Identity | This key is used to sign your fleet root certificate. The root certificate certifies the identity of your fleet and is used to sign device certificates. The OTA Connect server can then validate device certificates to ensure that a connecting device is part of your fleet. +| Key Name | Purpose | Description +| Fleet Root | Device Identity | The private key of your Fleet Root CA. This is used to sign the individual device certificates for your fleet of devices. The OTA Connect server can then validate device certificates to ensure that a connecting device is part of your fleet. See xref:client-provisioning-methods.adoc[Device Provisioning Methods] for more details. -If you obtain a root certificate from an external certificate authority such as DigiCert, you don't have to worry about managing this key. The certificate authority takes are of this for you. | Uptane Root | Software Integrity | This key is used to sign the "root" metadata file for your software repository. This file contains information about all the roles that can sign software metadata. For more information on how to take these keys offline, see the topic "xref:rotating-signing-keys.adoc[Manage keys for software metadata]". | Uptane Targets | Software Integrity | This key is used to sign the "targets" metadata file for software updates. This file contains information about all the valid software files in your software repository. For more information on how to take these keys offline, see the topic "xref:rotating-signing-keys.adoc[Manage keys for software metadata]". |==================== diff --git a/docs/ota-client-guide/modules/ROOT/pages/posix-secondaries-bitbaking.adoc b/docs/ota-client-guide/modules/ROOT/pages/posix-secondaries-bitbaking.adoc index 83c62f77ce..fa9c4cdedd 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/posix-secondaries-bitbaking.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/posix-secondaries-bitbaking.adoc @@ -9,21 +9,19 @@ endif::[] :build-qemu-link: xref:build-qemu.adoc[How to build 'core-image-minimal' image for QEMU]] -The goal of this doc is to guide a reader on bitbaking of two type of images `primary` and `secondary` that are targeted for QEMU or RPi +The goal of this doc is to guide a reader on bitbaking of two type of images, `primary` and `secondary`, that are targeted for QEMU or Raspberry Pi and running of which on the target makes it act as `Primary` and `Secondary` ECU of a single device. Refer to link:https://uptane.github.io/uptane-standard/uptane-standard.html[the Uptane standard] in order to grok the meaning of the `Primary` and `Secondary` terms in the given context from a theoretical standpoint. -Check out xref:posix-secondaries.doc[this doc] to understand these terms from a practical standpoint and -to learn how `Primary` and `Secondary` can be emulated locally on an user's host. +Check out xref:posix-secondaries.adoc[this doc] to understand these terms from a practical standpoint and to learn how `Primary` and `Secondary` can be emulated locally on an user's host. It is highly advisable to follow the doc instructions and play with emulated `Primary` and `Secondary` prior to running steps described further in this doc. == Bitbaking -It is assumed that a reader is familiar with Yocto and bitbaking in general as well as bitbaking of Aktualizr images in particular, -details of which are out of scope of this doc and can be found in the following guides: +It is assumed that the reader is familiar with Yocto, bitbaking, and meta-updater. If not, please consult the following links: -* link:https://github.com/advancedtelematic/meta-updater/blob/master/README.adoc[meta-updater README]. -* +* link:https://github.com/advancedtelematic/meta-updater/blob/master/README.adoc[meta-updater README] +* xref:build-qemu.adoc[How to build 'core-image-minimal' image for QEMU] === Primary To bitbake an image for `Primary` run the following: @@ -31,9 +29,8 @@ To bitbake an image for `Primary` run the following: bitbake primary-image .... - .Primary configuration variables -[cols="1,1,10"] +[%autowidth.stretch] |=== |Name |Default |Description @@ -43,29 +40,29 @@ bitbake primary-image |`PRIMARY_PORT` |`"9040"` -| A TCP port that Primary aktualizr listen on for connections from Secondaries +| A TCP port that Primary aktualizr listens on for connections from Secondaries |`PRIMARY_WAIT_TIMEOUT` |`"120"` -|Time (seconds) to wait for connections from Secondaries. Only the secondaries that connected to Primary will be registered at the server and are part of the device Primary represents. +|Time (seconds) to wait for connections from Secondaries. Note that, while a Primary can still function if some Secondaries failed to connect, it will only provision if all its Secondaries have connected before this delay. |`PRIMARY_SECONDARIES` |`"10.0.3.2:9050"` | A space separated list of TCP/IP addresses of the Secondaries to be included into the list of ECUs served by the given Primary |=== -Note that PRIMARY_SECONDARIES can be a list of TCP/IP addresses in order to fulfill multiple secondaries use case. +Note that PRIMARY_SECONDARIES can be a list of TCP/IP addresses in order to fulfill multiple Secondaries use case. For example, `PRIMARY_SECONDARIES = "10.0.3.2:9050 10.0.3.3:9050 10.0.3.4:9050"`. === Secondary -To bitbake an image for `Secondary` run the following +To bitbake an image for `Secondary` run the following: .... bitbake secondary-image .... .Secondary configuration variables -[cols="1,1,10"] +[%autowidth.stretch] |=== |Name |Default |Description @@ -76,17 +73,23 @@ bitbake secondary-image |`SECONDARY_PORT` |`"9050"` |A TCP port that Secondary listen on for connections from Primary + +|`SECONDARY_UPDATE_TYPE` +|`"ostree"` +|An update type to configure for Secondary, supported types: ostree, file |=== -==== Multiple secondaries use case -In order to support multiple secondaries use case an user should +Secondary images will also take advantage of some common configuration snippets described in xref:recommended-clientconfig.adoc[recommended client configurations], such as auto-reboot, persistent `journald` and debug logs. + +==== Multiple Secondaries use case +In order to support multiple Secondaries use case a user should: -* repeat the secondary bitbaking procedure corresponding number of times, each time +* repeat the Secondary bitbaking procedure corresponding number of times, each time ** specifying unique TCP/IP address by means of `SECONDARY_IP` and `SECONDARY_PORT` configuration variables ** copying and naming uniquely the resultant image file (e.g. `cp tmp/deploy/images/qemux86-64/secondary-image-qemux86-64.ota-ext4 secondary-images/secondary-image-qemux86-64.ota-ext4-001`) -* bitbake the primary image with `PRIMARY_SECONDARIES` listing the corresponding secondaries TCP/IP addresses -* run the primary by following the guide in <> -* run the secondaries by running the command specified in <> with a parameter pointing to corresponding secondary image. +* bitbake the Primary image with `PRIMARY_SECONDARIES` listing the corresponding Secondaries TCP/IP addresses +* run the Primary by following the guide in <> +* run the Secondaries by running the command specified in <> with a parameter pointing to corresponding Secondary image. For example, `../meta-updater/scripts/run-qemu-ota --no-gui --secondary-network secondary-images/secondary-image-qemux86-64.ota-ext4-001` @@ -97,7 +100,7 @@ For example, It is assumed that a reader is a familiar with bitbaking for Raspberry Pi in general, see xref:build-raspberry.adoc[Build a Raspberry Pi image]. -The aforementioned/above guide is relevant and applicable to building a Raspberry images of Primay and Secondary. +The aforementioned/above guide is relevant and applicable to building Raspberry Pi images for Primary and Secondary ECUs. The following is specifics of building such images targeting RPi. * run `source meta-updater/scripts/envsetup.sh raspberrypi3` to get the build environment set up from a root of the yocto project (updater-repo) @@ -105,7 +108,7 @@ The following is specifics of building such images targeting RPi. By default ethernet NIC is used which implies that Raspberry Pi is connected to LAN with an access to Internet. To use WiFi NIC the following configuration variables should be defined in your local configuration (local.conf): .WiFi configuration variables -[cols="1,7,10"] +[%autowidth.stretch] |=== |Name |Default |Description @@ -123,26 +126,26 @@ By default ethernet NIC is used which implies that Raspberry Pi is connected to |=== -==== RPi networking details in a context of Posix secondaries support +==== RPi networking details in a context of Posix Secondaries support -IP/Posix secondaries support implies that a single primary ECU connected to two IP networks: +IP/Posix Secondaries support implies that a single Primary ECU connected to two IP networks: * an IP network with an access to Internet for communication with the OTA backend; -* an IP network that does not have access to Internet for communication with secondary ECU(s). The secondaries should be connected to this internal network. +* an IP network that does not have access to Internet for communication with Secondary ECU(s). The Secondaries should be connected to this internal network. Taking into account that RPi has two NICs, ethernet and wifi the aforementioned requirements to networking can be fulfilled by applying the following approaches. ===== Primary uses multihomed ethernet interface, Secondary uses ethernet interface -Both primary and secondary ECUs has wifi turned off and are connected to the same LAN (via switch or router) that has an access to Internet. +Both Primary and Secondary ECUs has wifi turned off and are connected to the same LAN (via switch or router) that has an access to Internet. Primary only network interface is configured in such way that it has two IP addresses assigned to it. The first one (10.0.3.1 by default) is statically defined (can be configured via `PRIMARY_IP` configuration variable) -and connects the primary to an internal IP network (10.0.3.0/8) that is aimed for communication with secondary ECU(s). +and connects the Primary to an internal IP network (10.0.3.0/8) that is aimed for communication with Secondary ECU(s). Secondary(ies) are connected to the same internal network (10.0.3.0/8) by assigning to their -ethernet interface corresponding IP addresses (10.0.3.x, 10.0.3.2 for the default only secondary, `SECONDARY_IP` configuration variable). +ethernet interface corresponding IP addresses (10.0.3.x, 10.0.3.2 for the default only Secondary, `SECONDARY_IP` configuration variable). -The second IP address assigned to the primary ethernet NIC should be obtained from a DHCP server running on one of +The second IP address assigned to the Primary ethernet NIC should be obtained from a DHCP server running on one of the devices (usually a router) that is connected to the given LAN, has an access to Internet and provides each host connected to the given IP network with access to Internet (via NATing IP packets, DHCP and NAT server can be hosted/running on different devices). @@ -151,39 +154,39 @@ The given networking option is enabled by default. ===== Primary uses both wifi and ethernet interfaces, Secondary uses ethernet interface Primary has wifi on, and its wifi NIC is connected to a LAN with an access to Internet. Also, Primary ethernet NIC is assigned with an only IP address (10.0.3.1 by default) to connect to the internal network for communication -with secondary ECUs. +with Secondary ECUs. Secondary(ies) are connected to the same internal network (10.0.3.0/8) by assigning to their -ethernet interface corresponding IP addresses (10.0.3.x, 10.0.3.2 for the default only secondary, `SECONDARY_IP` configuration variable). +ethernet interface corresponding IP addresses (10.0.3.x, 10.0.3.2 for the default only Secondary, `SECONDARY_IP` configuration variable). ===== Primary and Secondary uses wifi, only Primary uses ethernet NIC In this case, both Primary and Secondary(ies) uses wifi NIC to connect to the internal network (wifi router should not have an Internet access). Secondary doesn't use ethernet NIC. Primary connects to Internet via ethernet NIC that should be connected to LAN with an access to Internet. -`(The given approach is not supported by the meta-updater but can be applied by an advanced user)` +(This approach is not supported by meta-updater but can be applied by an advanced user.) == Running It is assumed that a reader is familiar with details on running of bitbaked images targeted for QEMU, such information can be found in the following docs: -* link:https://github.com/advancedtelematic/meta-updater/blob/master/README.adoc[meta-updater README]. -* xref:build-qemu.adoc[How to build 'core-image-minimal' image for QEMU]] +* link:https://github.com/advancedtelematic/meta-updater/blob/master/README.adoc[meta-updater README] +* xref:build-qemu.adoc[How to build 'core-image-minimal' image for QEMU] === Primary -To launch QEMU VM acting as Primary run the following from your build directory +To launch QEMU VM acting as Primary run the following from your build directory: .... ../meta-updater/scripts/run-qemu-ota --no-gui --secondary-network primary-image .... -`--secondary-network` option instructs QEMU to add NIC to the VM in order to communicate with Secondary VM(s) via it. +The `--secondary-network` option instructs QEMU to add NIC to the VM in order to communicate with Secondary VM(s) via it. === Secondary -To launch QEMU VM acting as Secondary run the following from your build directory +To launch QEMU VM acting as Secondary run the following from your build directory: .... ../meta-updater/scripts/run-qemu-ota --no-gui --secondary-network secondary-image .... -`--secondary-network` option instructs QEMU to add NIC to the VM aimed for communication with Primary +The `--secondary-network` option instructs QEMU to add NIC to the VM aimed for communication with Primary. == Usage @@ -191,7 +194,7 @@ Once both Primary and Secondary VMs are running you should see that a new device The following are Tips & Tricks for using & troubleshooting of the Primary and Secondary VMs. * run `journalctl -f -u aktualizr` to see logs that are being output by aktualizr running on `Primary` VM; -* run `journalctl -f -u aktualizr-secondary` to see logs that are being output by aktualizr-secondary (posix/IP secondary) running on `Secondary` VM; +* run `journalctl -f -u aktualizr-secondary` to see logs that are being output by aktualizr-secondary (POSIX/IP Secondary) running on `Secondary` VM; * By default, both aktualizr and aktualizr-secondary are running as systemd services. Use `systemctl stop|start|restart ` to control aktualizr and aktualizr-secondary daemons/services managed by systemd; * To control aktualizr|aktualizr-secondary manually stop corresponding systemd service (see above) and run it from command line: just type `aktualizr' | `aktualizr-secondary`; @@ -201,8 +204,8 @@ In case of running as a systemd service add corresponding configuration fragment e.g. `echo -e "[logger]\nloglevel = 0" > /etc/sota/conf.d/50-debug-logs.toml` and restart the service; * In order to trigger a device re-provisioning, please, remove the DB file on Primary, i.e. `rm /var/sota/sql.db` * If the DB file is removed on Secondary then the device should be re-provisioned (see above), -otherwise Primary/aktualizr will refuse to work with a 'new' secondary as it will have a "new" autogenerated ECU serial +otherwise Primary/aktualizr will refuse to work with a 'new' Secondary as it will have a "new" autogenerated ECU serial that doesn't the one already been registered on Primary. -* OTA Connect does not support adding/removing secondary ECUs to a device that has been already registered. -Thus adding a new ECU to the list of secondaries on Primary won't take much effect, +* OTA Connect does not support adding/removing Secondary ECUs to a device that has been already registered. +Thus adding a new ECU to the list of Secondaries on Primary won't take much effect, the new ECU won't appear on the UI and it will be listed as not registered by aktualizr-info. diff --git a/docs/ota-client-guide/modules/ROOT/pages/posix-secondaries.adoc b/docs/ota-client-guide/modules/ROOT/pages/posix-secondaries.adoc index 066b6bc169..a82bc914c5 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/posix-secondaries.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/posix-secondaries.adoc @@ -16,15 +16,15 @@ It refers to ECU, software/firmware/package updates of which are managed by anot A key component of this repository is libaktualizr which is a library implementing Uptane protocol and providing customers and community with capability to build application(s) one of the purposes of which is to update software/firmware/packages in compliance with the Uptane standard. In addition to libaktualizr, a user can find in this repo another two components: -. *aktualizr*, aka aktualizr primary - a reference implementation of an application utilizing libaktualizr, its binary/executable is called aktualizr. Aktualizr can be used to emulate Primary ECU locally on a user's host; -. *aktualizr-secondary*, aka posix or IP secondary - a reference implementation intended for running on Secondary ECU and capable to communicate with Primary ECU aktualizr via TCP/IP protocol (implemenation relies on Posix API). Aktualizr-secondary can be used to emulate a Secondary ECU on a user's host. +. *aktualizr*, aka aktualizr-primary - a reference implementation of an application utilizing libaktualizr, its binary/executable is called aktualizr. Aktualizr can be used to emulate Primary ECU locally on a user's host; +. *aktualizr-secondary*, aka POSIX or IP Secondary - a reference implementation intended for running on Secondary ECU and capable to communicate with Primary ECU aktualizr via TCP/IP protocol (implemenation relies on Posix API). Aktualizr-secondary can be used to emulate a Secondary ECU on a user's host. == *Secondary* *Build* -The Secondary executable is built by default, thus following build instructions outlined in link:{aktualizr-github-url}/README.adoc[the repo's README] should do the trick. If the build procedure is completed successfully then aktualizr-secondary can be found in ``/src/aktualizr_secondary/aktualizr-secondary``. +The Secondary executable is built by default, thus following build instructions outlined in link:{aktualizr-github-url}/README.adoc[the repo's README] should do the trick. If the build procedure is completed successfully then aktualizr-Secondary can be found in `/src/aktualizr_secondary/aktualizr-secondary`. *Configure* @@ -40,36 +40,32 @@ More details on the configuration in general and specific parameters can be foun *Run* -``/src/aktualizr_secondary/aktualizr-secondary -c /config/posix-secondary.toml`` +`/src/aktualizr_secondary/aktualizr-secondary -c /config/posix-secondary.toml` Once it's started, you should see a message saying that aktualizr-secondary is listening on the port specified in the config. == *Primary* *Build* -The Primary executable is built by default, thus following build instructions outlined in link:{aktualizr-github-url}/README.adoc[the repo's README] should do the trick. If the build procedure is completed successfully then aktualizr executable can be found in ``/src/aktualizr_primary/aktualizr``. +The Primary executable is built by default, thus following build instructions outlined in link:{aktualizr-github-url}/README.adoc[the repo's README] should do the trick. If the build procedure is completed successfully then aktualizr executable can be found in `/src/aktualizr_primary/aktualizr`. *Configure* A default configuration of aktualizr acting as Primary can be found in link:{aktualizr-github-url}/config/sota-local-with-secondaries.toml[/config/sota-local-with-secondaries.toml]. -One configuration parameter that is worth mentioning is `[uptane]: secondary_config_file` which specifies a path -to secondary(ies) configuration file containing input parameters for aktualizr/Primary to communicate with any secondaries. -This is a link:{aktualizr-github-url}/config/posix-secondary-config.json[default secondary configuration for Primary]. -In order to use the given default config file, either copy it into your current working directory or update -`[uptane]: secondary_config_file` value so it defines a full path to the secondary config file (e.g. ``/config/posix-secondary-config.json``). +Of particular importance is the configuration parameter `[uptane]: secondary_config_file`, which specifies a path to a configuration file containing input parameters for the Primary (e.g. aktualizr) to communicate with any Secondaries. See for example link:{aktualizr-github-url}/config/posix-secondary-config.json[the default Secondary configuration for the Primary]. To use this config file, either copy it to your current working directory or set the `[uptane]: secondary_config_file` value to the full path of the Secondary config file (e.g. `/config/posix-secondary-config.json`). -Configuration parameters of secondaries for Primary/aktualizr that are worth mentioning are: +The available configuration parameters of Secondaries for the Primary are: * `secondaries_wait_port` - TCP port aktualizr listen on for connections from Secondaries -* `secondaries_wait_timeout` - timeout (in sec) of waiting for connections from Secondaries. Primary/aktualizr waits for a connection from those secondaries that it failed to connect to at the startup time. -* `secondaries` - a list of Secondary TCP/IP addresses +* `secondaries_wait_timeout` - timeout (in sec) of waiting for connections from Secondaries. Primary/aktualizr waits for a connection from those Secondaries that it failed to connect to at the startup time. +* `secondaries` - a list of TCP/IP addresses and the associated metadata verification type of each Secondary. Put your credential.zip file into the current working directory or update `[provision] provision_path` in link:{aktualizr-github-url}/config/sota-local-with-secondaries.toml[the config] so it specifies a full path to your credential file. *Run* -``/src/aktualizr_primary/aktualizr -c /config/sota-local-with-secondaries.toml`` +`/src/aktualizr_primary/aktualizr -c /config/sota-local-with-secondaries.toml` Once aktualizr is running, check the output for *_Adding Secondary to Aktualizr_* and *_Provisioned successfully on Device Gateway_*. You should then see that a new device has been registered on the server and that it includes two ECUs: your local Primary and Secondary. You can now send updates to either ECU. == *Tips and Tricks* @@ -82,35 +78,30 @@ echo 'export AKT_PROJ_HOME=""' >> ~/.profile .... echo 'export AKT_BUILD_DIR="$AKT_PROJ_HOME/build"' >> ~/.profile .... -* Use ``$AKT_PROJ_HOME/config/sota-local-with-secondaries.toml`` as the config for your emulated Primary ECU -* Copy ``$AKT_PROJ_HOME/config/posix-secondary-config.json`` to the directory you run your Primary from, let's call it `PRIMARY_HOME_DIR` further. -* Copy your credential file ``credentials.zip`` to `PRIMARY_HOME_DIR` -* Use ``$AKT_PROJ_HOME/config/posix-secondary.toml`` as the config for your emulated Secondary ECU -* To run the primary launch the following from `PRIMARY_HOME_DIR` +* Use `$AKT_PROJ_HOME/config/sota-local-with-secondaries.toml` as the config for your emulated Primary ECU +* Copy `$AKT_PROJ_HOME/config/posix-secondary-config.json` to the directory you run your Primary from (`PRIMARY_HOME_DIR`) +* Copy your credential file `credentials.zip` to `PRIMARY_HOME_DIR` +* Use `$AKT_PROJ_HOME/config/posix-secondary.toml` as the config for your emulated Secondary ECU +* To run the Primary launch the following from `PRIMARY_HOME_DIR` .... $AKT_PRIMARY -c $AKT_PROJ_HOME/config/sota-local-with-secondaries.toml .... -* To run the secondary launch the following from `PRIMARY_HOME_DIR` (although it can be executed from any directory) +* To run the Secondary launch the following from `PRIMARY_HOME_DIR` (although it can be executed from any directory) .... $AKT_SECNDR -c $AKT_PROJ_HOME/config/posix-secondary.toml .... * Add --loglevel 0 to the aforementioned launch commands if you would like to see more logs -* To re-register your emulated multi-ECU device (or start playing it from scratch) remove ``storage`` directory from `PRIMARY_HOME_DIR` +* To re-register your emulated multi-ECU device (or start playing it from scratch) remove `storage` directory from `PRIMARY_HOME_DIR` -=== Multiple secondaries +=== Multiple Secondaries -In order to emulate a device containing one primary ECU along with more than one secondary ECUs the following should be done. +In order to emulate a device containing one Primary ECU along with more than one Secondary ECUs the following should be done. -* Run the secondary executable `/src/aktualizr_secondary/aktualizr-secondary` desired number of times each from different directories. -Prior to running secondary executables, please, +* Run the Secondary executable `/src/aktualizr_secondary/aktualizr-secondary` desired number of times each from different directories. +Prior to running Secondary executables, please, - ** copy the configuration file /config/posix-secondary.toml to each directory the secondary will be launched from. - Let's call such directory as `SECONDARY_HOME_DIR`; + ** copy the configuration file `/config/posix-secondary.toml` to each directory the Secondary will be launched from (`SECONDARY_HOME_DIR`); - ** update a value of `[network]:port` parameter of the config file in each `SECONDARY_HOME_DIR` directory in such way - that each secondary config specifies different port number (9050 by default) hence each secondary will listen on different port; + ** update the value of `[network]:port` parameter of the config file in each `SECONDARY_HOME_DIR` directory such that each Secondary config specifies different port number (9050 by default) and thus each Secondary will listen on different port; -* Update posix-secondary-config.json located in `PRIMARY_HOME_DIR` (see instructions in p. `Primary`) with details of each secondary - that was executed in previous step, specifically, add corresponding values to "secondaries" list field (e.g. `"secondaries": [{"addr": "127.0.0.1:9050"}, {"addr": "127.0.0.1:9051"}]`). - Once posix-secondary-config.json is updated run the primary, as result you should see that it is connected with multiple secondaries - in aktualizr logs as well as on UI. +* Update `posix-secondary-config.json` located in `PRIMARY_HOME_DIR` (see instructions above in the `Primary` section) with details of each Secondary that was executed in previous step. Specifically, add corresponding values to `secondaries` list field (e.g. `"secondaries": [{"addr": "127.0.0.1:9050", "verification_type": "Full"}, {"addr": "127.0.0.1:9051", "verification_type": "Full"}]`). Once `posix-secondary-config.json` is updated, run the Primary. You should see that it is connected with multiple Secondaries in the aktualizr logs as well as in the web UI. diff --git a/docs/ota-client-guide/modules/ROOT/pages/provide-root-cert.adoc b/docs/ota-client-guide/modules/ROOT/pages/provide-root-cert.adoc index c95cf6d34e..1f2cb7cf2b 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/provide-root-cert.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/provide-root-cert.adoc @@ -10,14 +10,13 @@ endif::[] //MC: This is a copy of the topic "provide-testroot-cert.adoc" but intended for the "prod" use case. Need to use more includes to reduce redundancy -Once you are ready to move to production, you need to have your final fleet root certificate registered with your production account. Ideally, your fleet root certificate should come from an external certificate authority (CA) who can take care of safeguarding your private key. +Once you are ready to move to production, you need to have your final Fleet Root certificate registered with your production account. -During testing, you might have followed our procedure to generate a self-signed root certificate and had it registered with your test account. We don't recommend using a self-signed certificate for production because it makes you more vulnerable to security breaches. Nevertheless, if you'd prefer to stick with a self-signed certificate, you'll need to generate another one and have it registered with your production account. -* To register your fleet root certificate with HERE OTA Connect, send it to link:mailto:otaconnect.support@here.com[otaconnect.support@here.com]. +* To register your Fleet Root certificate with HERE OTA Connect, send it to link:mailto:otaconnect.support@here.com[otaconnect.support@here.com]. If you followed our recommendations, you should have one OTA Connect account for testing and another for production. Make sure that you specify that this certificate is for your *production* account. * While you wait for confirmation from the OTA Connect support team, you can already set up your xref:generate-devicecert.adoc[device certificate generation]. -* Once you've received confirmation that the fleet root certificate has been registered, you can xref:enable-device-cred-provisioning.adoc[enable device-credential provisioning and install the certificates] on your devices. \ No newline at end of file +* Once you've received confirmation that the Fleet Root certificate has been registered, you can xref:enable-device-cred-provisioning.adoc[enable device-credential provisioning and install the certificates] on your devices. diff --git a/docs/ota-client-guide/modules/ROOT/pages/provide-testroot-cert.adoc b/docs/ota-client-guide/modules/ROOT/pages/provide-testroot-cert.adoc deleted file mode 100644 index 430888ee9d..0000000000 --- a/docs/ota-client-guide/modules/ROOT/pages/provide-testroot-cert.adoc +++ /dev/null @@ -1,21 +0,0 @@ -= Register your test root certificate -ifdef::env-github[] - -[NOTE] -==== -We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. -==== -endif::[] - - -//MC: This is a copy of the topic "provide-root-cert.adoc" but intended for the "test" use case. Need to use more includes to reduce redundancy - -Once you have a root certificate, you need to have it registered with your account so that the OTA Connect server can verify your device certificates. - -* To register your test root certificate with HERE OTA Connect, send it to link:mailto:otaconnect.support@here.com[otaconnect.support@here.com]. - -If you followed our recommendations, you should have one OTA Connect account for testing and another for production. Make sure that you specify that this certificate is for your *test* account. - -* While you wait for confirmation from the OTA Connect support team, you can already xref:generatetest-devicecert.adoc[generate a test device certificate]. - -* Once you've received confirmation that the root certificate has been registered, you can xref:enable-device-cred-provtest.adoc[enable device-credential provisioning and install the certificate] on a test device. \ No newline at end of file diff --git a/docs/ota-client-guide/modules/ROOT/pages/push-images-with-bitbake.adoc b/docs/ota-client-guide/modules/ROOT/pages/push-images-with-bitbake.adoc new file mode 100644 index 0000000000..055623801d --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/push-images-with-bitbake.adoc @@ -0,0 +1,17 @@ +== Upload Yocto images using offline credentials + +After you rotate your software signing keys, you can no longer use credentials downloaded from the OTA Connect portal to upload software images. + +*To upload software images using offline credentials:* + +. Export your new offline Targets key into a new .zip file (for example, offline-credentials.zip). ++ +---- +garage-sign export-credentials --repo myimagerepo --key-name mytargets --output offline-credentials.zip +---- + +. Update your `local.conf` to use the new `offline-credentials.zip` file and run `bitbake` as before. + +As part of the `bitbake` process, the image metadata inside `targets.json` is signed with your offline TUF keys. The signed `targets.json` file is then uploaded to your OTA Connect account. + +To learn more about the BitBake commands and options, see its xref:useful-bitbake-commands.adoc[reference] documentation. diff --git a/docs/ota-client-guide/modules/ROOT/pages/pushing-updates.adoc b/docs/ota-client-guide/modules/ROOT/pages/pushing-updates.adoc index e0407e3508..2ac9ae7966 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/pushing-updates.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/pushing-updates.adoc @@ -1,4 +1,5 @@ -= Upload a sample software version += Add software to your Yocto image +:page-partial: ifdef::env-github[] [NOTE] @@ -7,12 +8,7 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] -:page-partial: -:page-layout: page -:page-categories: [quickstarts] -:page-date: 2017-05-23 16:31:35 -:page-order: 6 -:icons: font +include::partial$aktualizr-version.adoc[] Every time you bitbake a new image, it is automatically pushed to {product-name}. You can then send the updated image out to any of your devices. In this guide, you learn a few ways to push updated system images from your build machine or workstation to {product-name-short}. @@ -82,8 +78,8 @@ If you are building for 32-bit ARM (i.e. Raspberry Pi), you'll need some support ---- sudo dpkg --add-architecture i386 -sudo apt-get update -sudo apt-get install g++-multilib libssl-dev:i386 libcrypto++-dev:i386 zlib1g-dev:i386 +sudo apt update +sudo apt install g++-multilib libssl-dev:i386 libcrypto++-dev:i386 zlib1g-dev:i386 ---- For more details, see the https://github.com/imyller/meta-nodejs#cross-compiling-for-32-bit-target-on-64-bit-host[meta-nodejs README]. @@ -108,7 +104,7 @@ The screen sessions will be named `pianobar` and `patiobar` respectively; use `s == Make your own recipe or layer -A complete guide to writing Yocto recipes is out of scope here, but http://www.yoctoproject.org/docs/2.6/dev-manual/dev-manual.html#new-recipe-writing-a-new-recipe[the Yocto Reference Manual] is a great resource. You can also take a look at the recipes in https://github.com/advancedtelematic/meta-jukebox[`meta-jukebox`] to use as examples: they're all fairly simple, and there are examples of four different types of recipe: +A complete guide to writing Yocto recipes is out of scope here, but http://www.yoctoproject.org/docs/{yocto-version}/dev-manual/dev-manual.html#new-recipe-writing-a-new-recipe[the Yocto Reference Manual] is a great resource. You can also take a look at the recipes in https://github.com/advancedtelematic/meta-jukebox[`meta-jukebox`] to use as examples: they're all fairly simple, and there are examples of four different types of recipe: * https://github.com/advancedtelematic/meta-jukebox/tree/master/recipes-multimedia/pianobar[Pianobar] itself is a fully manual recipe, though it's a pretty simple one; it has specific instructions for the compile and install steps, though they're essentially just `make` and `make install` with a couple of config options. * https://github.com/advancedtelematic/meta-jukebox/tree/master/recipes-multimedia/libao[libao] and https://github.com/advancedtelematic/meta-jukebox/tree/master/recipes-multimedia/faad2[faad2] make use of https://en.wikipedia.org/wiki/GNU_Build_System[Autotools] to build. They include the line `inherit autotools` in the recipe, which automatically generates configure, compile, and install instructions based on Autotools. diff --git a/docs/ota-client-guide/modules/ROOT/pages/recommended-clientconfig.adoc b/docs/ota-client-guide/modules/ROOT/pages/recommended-clientconfig.adoc index ee9fee2fea..232a242517 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/recommended-clientconfig.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/recommended-clientconfig.adoc @@ -7,7 +7,6 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] - Before you start developing or deploying to production, you should check that your configuration file has appropriate settings for your use case. The configuration file and exact parameters depend on how you use OTA Connect: @@ -36,6 +35,14 @@ include::partial$config-descriptions.adoc[tags=pollconfig-dev] `IMAGE_INSTALL_append = " aktualizr-auto-reboot "` | include::partial$config-descriptions.adoc[tags=autorebootconfig-dev] +| +`IMAGE_INSTALL_append += " systemd-journald-persistent"` +| +To troubleshoot problems that you might encounter during development, we recommend that you enable persistent `systemd` logging. This setting is enabled by default for newly configured environments (see link:https://github.com/advancedtelematic/meta-updater/tree/master/conf/local.conf.systemd.append[local.conf.systemd.append]). +| +`IMAGE_INSTALL_append += " aktualizr-log-debug"` +| +We also recommending running with debug logging enabled in aktualizr during development. |==================== @@ -53,6 +60,15 @@ include::partial$config-descriptions.adoc[tags=pollconfig-prod] Remove `aktualizr-auto-reboot` from the `IMAGE_INSTALL_append` parameter. | include::partial$config-descriptions.adoc[tags=autorebootconfig-prod] +| +Remove `systemd-journald-persistent` from the `IMAGE_INSTALL_append` parameter. +| +If you followed our recommendation to enable log persistence for development, you may want to disable it for production to save space on your device. +| +Remove `aktualizr-log-debug` from the `IMAGE_INSTALL_append` parameter. +| +If you followed our recommendation to enable debug-level logging for development, you may want to disable it for production to reduce file writes. + |==================== diff --git a/docs/ota-client-guide/modules/ROOT/pages/release-process.adoc b/docs/ota-client-guide/modules/ROOT/pages/release-process.adoc index 0083424ac5..3012692a0f 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/release-process.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/release-process.adoc @@ -1,6 +1,8 @@ = Release process :toc: macro :toc-title: +:sectnums: +:aktualizr-github-url: https://github.com/advancedtelematic/aktualizr/tree/master ifdef::env-github[] @@ -10,10 +12,11 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] - To create a new link:https://github.com/advancedtelematic/aktualizr/releases[release of aktualizr and garage-deploy], there are several discrete steps to follow: +ifdef::env-github[] toc::[] +endif::[] == Update the changelog and other docs @@ -22,10 +25,21 @@ This is normally a good time to update the link:https://github.com/advancedtelem There are a few additional documents that should be updated to refer to the new release: * The link:https://github.com/advancedtelematic/aktualizr/blob/master/docs/README.adoc#reference-documentation[docs README] contains a table with links to the doxygen docs for each release. -* The xref:install-garage-sign-deploy.adoc[garage-deploy] installation guide contains a reference to the latest release of the garage-deploy Debian package. +* Several pages contains references to the latest release of aktualizr and/or garage-deploy via a variable set in a https://github.com/advancedtelematic/aktualizr/blob/master/docs/ota-client-guide/modules/ROOT/pages/_partials/aktualizr-version.adoc[doc snippet] created for this purpose. This is also a good time to review the docs in general and to consider whether any changes in this release might have affected the docs. +== Pull in any new changes from the current docs branch + +The docs published as https://docs.ota.here.com/ota-client/latest/index.html[latest] in the OTA Connect Developer Guide are built from the most recent release's docs branch (`\{version}-docs`). There will very likely be changes from there that have not been pulled into master yet. Open up a PR to merge the previous release's docs into master, resolving any merge conflicts as needed. Once that PR is merged, you can move on to the next step. + +The cleanest way to do this (especially if there were multiple changes to the docs branch) is to merge the docs branch locally and then rebase on master to remove the merge commits: + +---- +git merge origin/2020.2-docs +git rebase origin/master +---- + == Create a new tag Releases are built automatically by gitlab from annotated tags of the form `major.minor`, where `major` and `minor` are numbers. We normally set `major` to the current year and `minor` to an incrementing number beginning at 1. @@ -39,51 +53,95 @@ git push github Gitlab will build this tag and automatically create a release for it on github. -== Update doxygen on github +=== Updating an already-released tag -To update the doxygen documentation for master on github, you will need to do something like the following: +Generally, you should not update a tag after it has already been pushed to github. This is asking for confusion and problems down the line. However, if something goes wrong in creating the release from the tag, you may need to do this anyway. Note that you will need to manually delete the release on github if it has been created already. (Otherwise, the release's files will not get updated.) You will then need to either retry the specific github-release step or re-trigger the entire pipeline (which can be done by re-pushing the tag to github). -1. In an aktualizr repo, run `make doxygen` (or `make docs`) in the build directory. -1. Clone a second aktualizr repo and run `git checkout gh-pages`. -1. In the second repo, run `git rm search/* *.css *.html *.js *.png *.map *.md5 *.png *.svg`. -1. Copy the contents of `/docs/doxygen/html` into the root of the second repo. (Something like `cp -a /docs/doxygen/html/* `.) -1. In the second repo, run `git add .`, `git commit -as`, and `git push`. -1. Wait a minute or two for github to refresh and render the files. +== Create a new docs branch -== Add doxygen pages for the new release on github +Create (and push to github) a new branch with the commit you just tagged as the parent: -To add doxygen docs for a new tag, you will need to do something like the following: +---- +git checkout -b -docs # e.g. git checkout -b 2019.63-docs +git push github --set-upstream -docs +---- -1. Check out the tag or commit you wish to add (`git checkout 2018.63`, for example). -1. Clean out the build directory (to remove stale objects), then run CMake and doxygen again: +=== Update version strings in antora.yml for old and new branch + +The versioning of the docs is controlled by the antora.yml file located at `docs/ota-client-guide/antora.yml`. The latest version should have its version set to *latest*, and its display_version set to * (latest)*. That will initially make it conflict with the version in the previous docs branch; you can't have two different branches with the same version set. To resolve this, fix the version number in `antora.yml` on the previous branch--from *latest* to its actual version number. + +So, if you're releasing version 2020.4, your `antora.yml` on the two most recent branches should look like this: + +[{tabs}] +==== +2020.4-docs:: + +-- ---- -rm -rf build/* -cd build -cmake .. -make doxygen +name: ota-client +title: OTA Connect Developer Guide +version: latest +display_version: 2020.4 (latest) +nav: +- modules/ROOT/nav.adoc ---- +-- + +2020.3-docs:: + -1. Clone a second aktualizr repo and run `git checkout gh-pages`. -1. In the second repo, make a directory for the tag or commit you wish to add, i.e. `mkdir 2018.63`. -1. Copy the contents of `/docs/doxygen/html` into the directory you just created. (Something like `cp -a /docs/doxygen/html/* /2018.63`.) -1. In the second repo, run `git add 2018.63`, `git commit -as`, and `git push`. -1. Wait a minute or two for github to refresh and render the files. +-- +---- +name: ota-client +title: OTA Connect Developer Guide +version: '2020.3' +display_version: '2020.3' +nav: +- modules/ROOT/nav.adoc +---- +-- +==== + + +== Update doxygen on github + +You will need to update the link:https://advancedtelematic.github.io/aktualizr/index.html[doxygen documentation] both for the new release and master. + +This step is now done automatically through a GitLab pipeline but you can refer to the link:{aktualizr-github-url}/scripts/publish_github_docs.sh[automation script] for the exact steps, in case it needs to be done manually. + +The pages should be updated a few minutes after a successful release pipeline execution. == Update the description of the github release Once the release is ready on github, it should be edited to include a link to the changelog and doxygen documentation for that particular release. You can use a previous release as a model of how to format these links. -== Update the Uptane fork of aktualizr +== Update the homebrew recipe for aktualizr -Uptane has a fork of aktualizr in link:https://github.com/uptane/aktualizr[their namespace]. It should be updated with the same version of aktualizr used to make the new release. +The https://github.com/advancedtelematic/homebrew-otaconnect/blob/master/aktualizr.rb[homebrew aktualizr formula] should be updated with the new release. -== Update the homebrew recipe for aktualizr +There is the gitlab CI job that will automatically update the formula, build a bottle, and upload the bottle to the github release page when a new release is created. + +Also, this job will make a pull request to update the formula in the tap. The pull request should be listed https://github.com/advancedtelematic/homebrew-otaconnect/pulls/[here]. You should merge it to a master branch of the https://github.com/advancedtelematic/homebrew-otaconnect/[homebrew-otaconnect repo], so mac users can install the latest aktualizr release. -The https://github.com/advancedtelematic/homebrew-otaconnect/blob/master/aktualizr.rb[homebrew aktualizr recipe] should be updated with the new release. You'll need a mac, with homebrew installed, to do this. +=== Fallback: manually build a bottle and update the homebrew recipe -. Edit the recipe on your local system with `brew edit aktualizr`, and replace the old version tag with the new one. -. Build it with `brew reinstall --build-from-source --build-bottle aktualizr`. This will create a bottle file named `+aktualizr--VERSION.mojave.bottle.tar.gz+`, and output a block of Ruby code that looks something like this: +If the CI job failed for some reason, you might need to manually create the new bottle and edit the recipe. Here are the steps to follow (you'll need a Mac with homebrew installed): + +. Clone a repo/tap that contains the aktualizr recipe unless it has already been done ++ +---- +brew tap advancedtelematic/otaconnect +---- +. Edit the recipe on your local system with `brew edit aktualizr`, and replace the old version and revision with the new one. +. Build it, and then bottle it: ++ +---- +brew uninstall -f aktualizr +brew install -v --build-bottle aktualizr +brew bottle --json --no-rebuild --force-core-tap --root-url=${RELEASE_BASE_URL}/${VERSION} aktualizr +where RELEASE_BASE_URL=https://github.com/advancedtelematic/aktualizr/releases/download and VERSION is the new version tag +---- ++ +This will create a bottle file named `+aktualizr--VERSION.mojave.bottle.tar.gz+`, and output a block of Ruby code that looks something like this: + ---- bottle do @@ -91,12 +149,26 @@ The https://github.com/advancedtelematic/homebrew-otaconnect/blob/master/aktuali sha256 "391bc242685d86fd4fc69d90d98e10a464e6feebca943d3f48f848615c898085" => :mojave end ---- -. Rename the file, removing one of the dashes (i.e. `+mv aktualizr--2019.6.mojave.bottle.tar.gz aktualizr-2019.6.mojave.bottle.tar.gz+`). I don't know why the generated filename is always wrong, but it is. +. Update the recipe with the new bottle block ++ +---- +brew bottle --merge --write --no-commit ./aktualizr--${VERSION}.mojave.bottle.json +---- +. Rename the file, removing one of the dashes. I don't know why the generated filename is always wrong, but it is. ++ +---- +mv aktualizr--${VERSION}.mojave.bottle.tar.gz aktualizr-${VERSION}.mojave.bottle.tar.gz +---- . Add the renamed bottle file as an artifact to the release on the https://github.com/advancedtelematic/aktualizr/releases[aktualizr releases page]. -. Replace the `bottle do` block in your local recipe with the generated block from step 2, and add the appropriate `root_url` directive. . Test the recipe locally, including installing from the bottle: `brew reinstall --force-bottle aktualizr`. . Open a PR on the https://github.com/advancedtelematic/homebrew-otaconnect[homebrew-otaconnect] repo to update the recipe with all your changes. -== Test the released Debian packages +== Verify the released Debian packages + +Newly created releases automatically trigger an OTF pipeline in gitlab. Currently, you still need to manually verify that the pipeline actually succeeded. + +== Update meta-updater + +The version of aktualizr used by link:https://github.com/advancedtelematic/meta-updater/[meta-updater] should be updated to match the new release. First, open a PR against dunfell that updates aktualizr to the same commit used in the newly released tag. (Historically, we started with master, but we now skip straight to dunfell.) This is also a good time to update the aktualizr recipe to pull the latest version of link:https://ats-tuf-cli-releases.s3-eu-central-1.amazonaws.com/index.html[garage-sign]. -Don't forget to test the resulting Debian packages manually! +Once that PR has passed oe-selftest, successfully passed review, and gotten merged, you should then backport that change, along with anything else relevant since the last backport was done, to the other xref:yocto-release-branches.adoc[currently supported release branches]. Note that while master is allowed to use arbitrary recent version of aktualizr, the release branches should only use released versions of aktualizr. diff --git a/docs/ota-client-guide/modules/ROOT/pages/remove-sw-version.adoc b/docs/ota-client-guide/modules/ROOT/pages/remove-sw-version.adoc new file mode 100644 index 0000000000..a84af0eaf1 --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/remove-sw-version.adoc @@ -0,0 +1,38 @@ += Remove a software version +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + +To remove a software version, you need to delete a target in the Targets metadata file. +include::garage-sign-reference.adoc[tags=target-term] + +Once you delete a target, you can no longer install it on devices. + +*To delete a target:* + +include::upload-large-binary.adoc[tags=gs-initialize] + +include::upload-large-binary.adoc[tags=gs-pull-targets] + +. To delete the target, depending on the type of image, specify the target name and version in one of the following forms: + +** For OSTree images, `_` +** For binary images, `-` ++ +[source,bash] +---- +garage-sign targets delete \ + --filename - +---- + +include::upload-large-binary.adoc[tags=gs-sign-targets] + +include::upload-large-binary.adoc[tags=gs-push-targets] + +You can no longer see the deleted software version in your portal software repository. + +To learn more about the `garage-sign` commands and options, see its xref:garage-sign-reference.adoc[reference] documentation. diff --git a/docs/ota-client-guide/modules/ROOT/pages/reporting-problems.adoc b/docs/ota-client-guide/modules/ROOT/pages/reporting-problems.adoc new file mode 100644 index 0000000000..6356c64ebe --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/reporting-problems.adoc @@ -0,0 +1,59 @@ += Reporting problems +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + +:page-layout: page +:page-categories: [tips] +:icons: font + +== Contacting HERE OTA Connect support + +If you encounter a problem with {product-name} and the xref:troubleshooting.adoc[Troubleshooting article] doesn't address it, please contact link:mailto:otaconnect.support@here.com[otaconnect.support@here.com] and we'll do our best to help! To help us diagnose your issue, please provide us with as much information as possible. What follows are some useful artifacts as well as how to retrieve them. + +=== aktualizr logs with debug logging + +By default, aktualizr writes logs to the systemd logger, which you can read with `journalctl -u aktualizr`. If you run aktualizr manually on the commandline, the logging is printed on stdout. The default loglevel is 2 (info). Lower loglevels are more verbose. To change the loglevel, there are two options: + +* To build a new image with debug logging enabled, add `IMAGE_INSTALL_append = " aktualizr-log-debug "` to your `local.conf` and bitbake a new image. See the xref:meta-updater-usage.adoc#_aktualizr_configuration[meta-updater usage article] for more details. +* To enable debug logging on a device accessible via the commandline without building a new image, stop the aktualizr service on the device (`systemctl stop aktualizr`) and manually start aktualizr on the commandline, e.g. `aktualizr --loglevel 0`. + +See also the xref:aktualizr-config-options.adoc#_logger[aktualizr configuration options article]. + +=== Output of aktualizr-info + +See the xref:debugging-tips.adoc#_inspect_stored_info_with_aktualizr_info[debugging tips] for more information. + +=== aktualizr version + +On an active device accessible via the commandline, enter `aktualizr --version`. Otherwise, you can examine the aktualizr recipe in meta-updater in your build environment and look for the `SRCREV` variable. This recipe will normally be located (relative to your Yocto build directory) at `../meta-updater/recipes-sota/aktualizr/aktualizr_git.bb`. + +=== aktualizr configuration + +If you are able to run aktualizr with loglevel 0, the final configuration will be printed to the log when aktualizr starts. See the xref:aktualizr-config-options.adoc#_logger[aktualizr configuration options article] for more information. + +If you are having problems with Secondaries, please also send us the Secondary JSON configuration file for the Primary (`secondary_config_file`) described in xref:aktualizr-config-options.adoc#_uptane[the same article]. + +=== Yocto release branch and layers used to build the image + +If you have the link:https://source.android.com/setup/build/downloading[Android repo] tool available and initialized your build environment with it (such as via the link:https://github.com/advancedtelematic/updater-repo/[updater-repo]), you can run `repo manifest -r` to print out all of the layers you are using and the revisions that are checked out. + +If Android repo is not available or was not used, please send us your `bblayers.conf` file, which normally can be found in the `conf` subdirectory of your bitbake build directory. Please also let us know which Yocto release branch you are using. + +See the xref:yocto-release-branches.adoc[Yocto release branches article] for more information. + +=== Yocto configuration + +For problems with bitbaking, please send us your `local.conf` file, which normally can be found in the `conf` subdirectory of your bitbake build directory. + +=== aktualizr-secondary logs + +If you are using aktualizr-secondary and having a problem with it, its logs may be helpful as well. aktualizr-secondary logs in the same way as aktualizr on the Primary, except for the name of the application. + +=== aktualizr-secondary configuration + +The aktualizr-secondary configuration may be helpful if applicable. aktualizr-secondary is configured similarly to aktualizr on the Primary, and it also prints the entire final configuration to the log if loglevel 0 is used. diff --git a/docs/ota-client-guide/modules/ROOT/pages/rollback.adoc b/docs/ota-client-guide/modules/ROOT/pages/rollback.adoc index a0d37ce3ef..ccfea2dbba 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/rollback.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/rollback.adoc @@ -19,7 +19,7 @@ Currently only U-boot 'bootcount' rollback mechanism is supported. == U-boot -More info about bootcount feature you can find in U-boot https://www.denx.de/wiki/DULG/UBootBootCountLimit[documentation]. +More info about bootcount feature you can find in U-boot https://www.denx.de/wiki/Knowhow/DULG/UBootBootCountLimit[documentation]. When using 'bootcount' the system can be in one of three states: diff --git a/docs/ota-client-guide/modules/ROOT/pages/rotating-signing-keys.adoc b/docs/ota-client-guide/modules/ROOT/pages/rotating-signing-keys.adoc index 3a8420cd58..7ef1226a12 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/rotating-signing-keys.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/rotating-signing-keys.adoc @@ -1,4 +1,4 @@ -= Manage keys for software metadata += Rotate keys for Root and Targets metadata ifdef::env-github[] [NOTE] @@ -7,103 +7,59 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] +Before you start, make sure that you have installed the https://github.com/advancedtelematic/ota-tuf/tree/master/cli[garage-sign] tool. If you are on a Debian-based Linux distro, you can also install the `garage-sign` tool with the xref:install-garage-sign-deploy.adoc[garage-deploy] tool as they are packaged together. -OTA Connect has a security concept that includes signing metadata files with secure, offline keys. For more information about these files, see the xref:uptane.adoc#_uptane_metadata[Uptane metadata overview]. +IMPORTANT: After you rotate keys, you will not be able to upload software images via the OTA Connect web UI. Use xref:push-images-with-bitbake.adoc[BitBake] or xref:upload-large-binary.adoc[garage-sign] instead. -== How metadata is signed by default - -As part of the quickstart, the OTA Connect server automatically creates two initial keys as part of the account setup process. These keys are stored on the OTA Connect server and are used to automatically sign the two software metadata files. You don't have to think about this metadata at all. - -== How metadata should be signed in production - -Before using OTA Connect in production, however, you should create offline keys that you manage yourself, then rotate out the default keys that were automatically created for your account on the OTA Connect server. If you don't do this, you expose yourself to risks that we describe the in the xref:pki.adoc[key management] topic. - -Instead of being signed on the server, these metadata files will now be signed locally or on your build machine. The signing happens automatically whenever you push a new disk image to OTA Connect. However, you need to update your build cofiguration first. The following procedures show you how to do this. - -Before you start, make sure that you've installed the xref:install-garage-sign-deploy.adoc[`garage-deploy`] tool first. This tool includes the `garage-sign` utility, which you'll need for this procedure. - -== Rotate the keys for `root` and `targets` metadata - -=== Create a local image repository - -A image repository is just a directory structure containing signed metadata in JSON format. Create a new one called _myimagerepo_ with `garage-sign`: +*To rotate the keys:* +. Create a local image repository. ++ ---- garage-sign init --repo myimagerepo --credentials /path/to/credentials.zip ---- - -This command creates a `./tuf/myimagerepo/` directory tree in the current directory. ++ +A `./tuf/myimagerepo/` directory tree is created in the current directory. This directory should be secured on an encrypted filesystem. -=== Generate new keys - -There are two metadata files in the repo and each file needs a new key to sign it. - +. Generate new Root and Targets keys. ++ ---- garage-sign key generate --repo myimagerepo --name myroot --type rsa garage-sign key generate --repo myimagerepo --name mytargets --type rsa ---- ++ +IMPORTANT: Keep these keys offline on secure hardware and do not lose them. If you lose the root key for an environment, it will no longer be possible to update software on any devices connected to that environment. Once you rotate your keys offline, you are responsible for keeping them safe. HERE has no ability to recover them for you. -**** -IMPORTANT: It is critical to keep these keys offline on secure hardware. *Do not lose these keys.* -**** - -=== Rotate the online keys with your new offline keys - -This is a four-step process: - -. Pull the current `targets.json` from OTA Connect: +. Pull the current `targets.json` file from OTA Connect. + ---- garage-sign targets pull --repo myimagerepo ---- -. Perform a complete root key rotation: + +. Rotate your online Root key with the new offline key that you created in step 2. + ---- garage-sign move-offline --repo myimagerepo --old-root-alias origroot \ --new-root myroot --new-targets mytargets ---- + -This command -+ -* removes the original `root` key from OTA Connect, -* generates a new `root.json` with the keys generated in the previous step (`myroot` and `mytargets`), -* signs the new `root.json` with both the old and new `root` keys, and -* uploads the newly signed `root.json` to OTA Connect +A new `root.json` file is generated, signed, and uploaded to OTA Connect. + +. Sign the current `targets.json` file with the new Targets key. + -. Sign the current `targets.json` with the new `targets` key: +TIP: This metadata expires after a predefined period. If you want to change the metadata expiry period, add the `--expires` or `--expire-after` option. For more information, see our guide to xref:metadata-expiry.adoc[managing metadata expiry dates]. + ---- garage-sign targets sign --repo myimagerepo --key-name mytargets ---- -+ -[TIP] -==== -This metadata eventually expires after a predefined period. If you'd like to define your own metadata expiry period, you can add the `--expires` or `--expire-after` option. For more information about these options, see our guide to xref:metadata-expiry.adoc[managing metadata expiry dates]. -==== -+ -. Upload the newly signed `targets.json` to OTA Connect: + +. Upload the newly signed `targets.json` to OTA Connect. + ---- garage-sign targets push --repo myimagerepo ---- -You have now successfully taken the keys for software metadata offline. - -[IMPORTANT] -==== -After rotating keys, you will no longer be able to upload packages through the OTA Connect web UI--only the usual way, through bitbake. -==== - -== Push new images with bitbake - -Export the new offline `targets` into a new credentials file that you can use with `bitbake`: - ----- -garage-sign export-credentials --repo myimagerepo --target-key-name mytargets --to offline-credentials.zip ----- - -Update your `local.conf` to use the new `offline-credentials.zip` file and run `bitbake` as before. - -As part of the `bitbake` process, the image's metadata inside `targets.json` is signed with your offline TUF keys. The signed `targets.json` is then uploaded to your OTA Connect account. +Your keys for software metadata are now offline. +To learn more about the `garage-sign` commands and options, see its xref:garage-sign-reference.adoc[reference] documentation. diff --git a/docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc b/docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc index 1c1d44fdad..e9e2a6c05e 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/schema-migrations.adoc @@ -14,14 +14,13 @@ endif::[] 1. Modify link:{aktualizr-github-url}/config/sql/schema.sql[] as you see fit 2. Write a config/sql/migration/migrate.n+1.sql that will convert an existing data to format in schema.sql. Note that old migrations must not be modified. - ++ Make sure that the migrate.n+1.sql file updates the 'version' table: - ++ DELETE FROM version; INSERT INTO version VALUES(...); - ++ 3. Write a config/sql/rollback/rollback.n+1.sql that will revert the new data format to the previous one. It should contain the opposite steps of migrate.n+1.sql if possible. If the reverse operation is lossy, it should at the minimum bring the device to a state where it can be updated. - ++ The 'version' table has to be updated as well, to contain n. - 4. If the migration manipulates existing data in a non-trivial way (anything that's not simply a new table creation, deletion, renaming), it is strongly advised to write an explicit migration test with realistic data in link:{aktualizr-github-url}/src/libaktualizr/storage/sqlstorage_test.cc[], similar to `DbMigration18to19`. diff --git a/docs/ota-client-guide/modules/ROOT/pages/secure-software-updates-test.adoc b/docs/ota-client-guide/modules/ROOT/pages/secure-software-updates-test.adoc deleted file mode 100644 index 4d38193000..0000000000 --- a/docs/ota-client-guide/modules/ROOT/pages/secure-software-updates-test.adoc +++ /dev/null @@ -1,19 +0,0 @@ -= Test software update security -ifdef::env-github[] - -[NOTE] -==== -We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. -==== -endif::[] - - -//MC: This is a copy of the topic "secure-software-updates.adoc" but intended for the "test" use case. Need to use more includes to reduce redundancy. - -To secure your software updates, OTA Connect ensures that all software files have accompanying metadata that is signed according to the Uptane framework. - -When evaluating OTA Connect you don't have to worry about signing this metadata yourself. The OTA Connect server automatically signs the metadata after you upload software. - -However, for this process to work, the OTA Connect server must host the xref:pki.adoc[private keys] that are used to sign the metadata. This is a security risk -- if an attacker is able to infiltrate the OTA Connect server, they can use these private keys to sign metadata for malicious software and send it to your devices. - -To prevent an event like this from happening, you should take these private keys offline and sign the metadata in your development environment. Then you can push the signed metadata back to the server. To do this, you use the `garage-sign` command which is part of our xref:install-garage-sign-deploy.adoc[`garage-deploy`] tool. diff --git a/docs/ota-client-guide/modules/ROOT/pages/secure-software-updates.adoc b/docs/ota-client-guide/modules/ROOT/pages/secure-software-updates.adoc index 0a56192b54..8b33221fd0 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/secure-software-updates.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/secure-software-updates.adoc @@ -7,13 +7,16 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] +OTA Connect has a security concept that includes signing metadata files with secure, offline keys. For more information about these files, see the xref:uptane.adoc#_uptane_metadata[Uptane metadata overview]. -//MC: This is a copy of the topic "secure-software-updates-test.adoc" but intended for the "prod" use case. Need to use more includes to reduce redundancy. +== How metadata is signed by default -To secure your software updates, OTA Connect ensures that all software files have accompanying metadata that is signed according to the Uptane framework. +When you create an account on the https://connect.ota.here.com[HERE OTA Connect Portal], all of your xref:uptane.adoc[Uptane] keys and metadata files are managed by OTA Connect. The keys are generated by our crypto service when the account is created and stored in a Vault instance. When you build software images and upload them to your account on the OTA Connect Portal, we generate the metadata and sign it for you. -When evaluating OTA Connect you don't have to worry about signing this metadata yourself. The OTA Connect server automatically signs the metadata after you upload software. +== How metadata should be signed in production -However, for this process to work, the OTA Connect server must host the xref:pki.adoc[private keys] that are used to sign the metadata. This is a security risk -- if an attacker is able to infiltrate the OTA Connect server, they can use these private keys to sign metadata for malicious software and send it to your devices. +You can xref:rotating-signing-keys.adoc[rotate your software signing key] and take it offline, replacing it with a key held only by you. As only you have the key in this scenario, the OTA Connect server can no longer sign software for you. The metadata files will now be signed locally or on your build machine. The signing happens automatically whenever you push a new software image to OTA Connect. However, you need to update your build configuration first. -To prevent an event like this from happening, you should take these private keys offline and sign the metadata in your development environment. Then you can push the signed metadata back to the server. To do this, you use the `garage-sign` command which is part of our xref:install-garage-sign-deploy.adoc[`garage-deploy`] tool. \ No newline at end of file +It is recommended for all production deployments to rotate the keys because a person who gained access to your OTA Connect account would be able to send arbitrary malicious software to your vehicles. If your software signing key is offline, the maximal impact of an account compromise would be to send an already-signed image--the one that used to be valid. + +Before you use OTA Connect in production, create offline keys that you manage yourself, and then rotate out the default keys that were automatically created for your account on the OTA Connect server. If you don't do this, you expose yourself to risks that we described in the xref:pki.adoc[key management] topic. To take your key offline, use the `garage-sign` command which is part of our xref:install-garage-sign-deploy.adoc[`garage-deploy`] tool. diff --git a/docs/ota-client-guide/modules/ROOT/pages/security.adoc b/docs/ota-client-guide/modules/ROOT/pages/security.adoc index ee7dc52454..4a94b84da8 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/security.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/security.adoc @@ -10,11 +10,11 @@ endif::[] OTA Connect provides you with two levels of security which have different tradeoffs. You can work with the *default* security measures which are much less work to manage but expose you to certain risks. Or, you can follow our recommended *production-level* security measures which are more work to set up but provide more robust security. -In both cases, communication between the devices and the OTA Connect is always secured via mutual TLS (X.509 certificates) for authorization, authentication, and encryption. +In both cases, communication between the devices and the OTA Connect is always secured via mutual TLS (X.509 certificates) for authorization, authentication, and encryption. The following sections describe the different security levels for device provisioning and software updates: -= Device provisioning +== Device provisioning When provisioning devices, you need to consider how to prevent unauthorized devices from provisioning. You also need to prevent your devices from being redirected to a malicious server. @@ -24,14 +24,14 @@ This is usually done with a PKI (Public Key Infrastructure). Devices are automat + By default, **the OTA Connect server acts as its own PKI** and manages device keys and certificates for you. + -All you have to do is download an initial provisioning key and bake it into the disk image that you'll flash to your devices. OTA Connect then takes care of issuing and validating device certificates. We call this type of provisioning xref:client-provisioning-methods.adoc[*shared-credential provisioning*] because you're using one provisioning key for your whole device fleet. +All you have to do is download an initial provisioning key and bake it into the disk image that you'll flash to your devices. OTA Connect then takes care of issuing and validating device certificates. We call this type of provisioning xref:client-provisioning-methods.adoc[*shared-credential provisioning*] because you're using one provisioning key for your whole device fleet. + {zwsp} * **Production-level security measure** + In short, you need to use your own PKI to allocate device certificates. You shouldn't leave this responsibility to the OTA Connect server. What makes things easy also makes things risky. If a malicious actor compromised your OTA Connect account, you would be in trouble because they could get your **private key** for signing device certificates. They could then provision whatever devices they wanted. -= Software updates +== Software updates Software updates are protected according to the xref:uptane.adoc[Uptane Framework] specifications. Uptane is one of the first comprehensive automotive OTA updating security frameworks available. This framework requires several metadata files to be signed by two separate keys. This metadata is also validated on the server. @@ -42,6 +42,6 @@ By default, the OTA Connect server acts as a PKI for these keys too. When you cr {zwsp} * **Production-level security measure** + -As with device provisioning, you should use your own PKI to sign metadata for your software files and updates. Take the two xref:pki.adoc[signing keys] offline and sign the metadata locally. +As with device provisioning, you should use your own PKI to sign metadata for your software files and updates. Take the two xref:pki.adoc[signing keys] offline and sign the metadata locally. + Again, if a malicious actor compromised your OTA Connect account, they could get your **private keys** for signing software metadata. They could then upload infected software and push them out to your devices. This type of attack is infinitely more difficult if you keep your keys offline in a safe place. diff --git a/docs/ota-client-guide/modules/ROOT/pages/setup-boot-image-for-ostree.adoc b/docs/ota-client-guide/modules/ROOT/pages/setup-boot-image-for-ostree.adoc new file mode 100644 index 0000000000..fbdd7d1034 --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/setup-boot-image-for-ostree.adoc @@ -0,0 +1,44 @@ += Set up boot image layout for OSTree compatibility +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + +include::partial$aktualizr-version.adoc[] + +This is the second step in getting a new board running after xref:add-board-class.adoc[adding a new board class for the new target board in meta-updater]. + +This step involves getting Yocto to build a physical image with the partitions in the correct place, with the necessary modifications for OSTree. The standard approach here is to use https://www.yoctoproject.org/docs/{yocto-version}/dev-manual/dev-manual.html#creating-partitioned-images-using-wic[Wic, the OpenEmbedded Image Creator]. + +You will need to create a Wic Kickstart file that places the necessary firmware in memory where the board needs it to be. Typically, board vendors will provide a table with the memory layout of the board’s firmware. + +Once you complete this step, you can proceed to the final step of xref:add-meta-updater-to-vendors-sdk.adoc[adding meta-updater features to the vendor's SDK]. + + +For example, using our NXP board example, the layout was provided in the https://www.nxp.com/docs/en/user-guide/LSDKUG_Rev19.06.pdf[Landscape SDK User Guide], Chapter 4.2, tables 14 and 16. + +.Table of NXP memory layout +image::img::screenshot_lsdk_memory_layout.png[width=100%] + +.Table of NXP default layout +image::img::screenshot_lsdk_default_layout.png[width=100%] + + +The information from these tables can be translated into the following WKS file: +[source,bash] +---- +part BL2 --source rawcopy2 --sourceparams="file=atf/bl2_sd.pbl" --ondisk mmcblk --no-table --align 4 +part BL3 --source rawcopy2 --sourceparams="file=atf/fip_uboot.bin" --ondisk mmcblk --no-table --align 1024 +part fman-ucode --source rawcopy2 --sourceparams="file=fsl_fman_ucode_ls1043_r1.1_108_4_9.bin" --ondisk mmcblk --no-table --align 9216 +part qe-ucode --source rawcopy2 --sourceparams="file=boot/fsl_qe_ucode_1021_10_A.bin" --ondisk mmcblk --no-table --align 9472 +part uboot-scr --source bootimg-partition --ondisk mmcblk --fstype=msdos --fixed-size=100M --align 65540 <1> +part / --source otaimage --ondisk mmcblk --fstype=ext4 --label root --align 167940 <2> + +bootloader --ptable msdos +---- + +<1> In the table from the SDK documentation, this is the partition reserved for bootloader files. In the default configuration, this partition would contain the FIT image with the kernel image, initramfs, and device tree blob—that’s why there is 100 MB allocated to it. Since we are managing the FIT image with OSTree, though, we actually only use this partition for the inital UBoot script. It would be theoretically possible to shrink this partition greatly (as the initial script is only a few dozen bytes); we elected to leave it in its manufacturer-specified configuration for ease of use and stability. +<2> This is the only departure from the layout in the table. OSTree needs to manage `/boot`, so we pass the `otaimage` that Yocto creates to the Wic file here. diff --git a/docs/ota-client-guide/modules/ROOT/pages/simulate-device-basic.adoc b/docs/ota-client-guide/modules/ROOT/pages/simulate-device-basic.adoc index f3f042b2f9..c8fd2de4e9 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/simulate-device-basic.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/simulate-device-basic.adoc @@ -1,4 +1,5 @@ = Simulate a device without building a disk image +:page-partial: ifdef::env-github[] [NOTE] @@ -7,13 +8,13 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] -:page-partial: + :page-layout: page :page-categories: [quickstarts] :page-date: 2018-11-19 11:28:47 :page-order: 5 :icons: font -:aktualizr-version: 2019.5 +include::partial$aktualizr-version.adoc[] :sectnums: You can try out OTA Connect without having to build a whole device image. This can be useful for testing the system outside of actual device updates. In this guide, you will install aktualizr (the OTA client software) on your local machine, create a directory with configuration files and credentials for aktualizr to use, and run aktualizr from there. @@ -21,7 +22,7 @@ You can try out OTA Connect without having to build a whole device image. This c == Prerequisites * A Linux or MacOS machine to run aktualizr on -* link:generating-provisioning-credentials.html[Provisioning credentials] (`credentials.zip` file) +* xref:ota-client::generating-provisioning-credentials.adoc[Provisioning credentials] (`credentials.zip` file) == Install aktualizr locally @@ -90,12 +91,12 @@ Build from source:: Modify as needed for your distro. See https://github.com/advancedtelematic/aktualizr for further instructions [source,sh,subs="attributes"] ---- -sudo apt install asn1c build-essentialclang clang-format-6.0 clang-tidy-6.0 cmake \ -curl doxygen graphviz lcov libarchive-devlibboost-devlibboost-filesystem-dev \ -libboost-log-dev libboost-program-options-dev libboost-serialization-dev \ -libboost-iostreams-dev libcurl4-openssl-dev libdpkg-dev libostree-dev libp11-2 \ -libp11-dev libpthread-stubs0-dev libsodium-devlibsqlite3-devlibssl-devlibsystemd-dev \ -python3-dev python3-openssl python3-venv sqlite3 valgrind +sudo apt install asn1c build-essential clang clang-format-11 clang-tidy-11 \ +cmake curl doxygen graphviz lcov libarchive-dev libboost-dev \ +libboost-filesystem-dev libboost-log-dev libboost-program-options-dev \ +libcurl4-openssl-dev libostree-dev libp11-3 libp11-dev libpthread-stubs0-dev \ +libsodium-dev libsqlite3-dev libssl-dev python3-dev python3-openssl \ +python3-venv sqlite3 valgrind ---- .Clone the sources from GitHub [source,sh,subs="attributes"] @@ -122,7 +123,7 @@ Each directory should contain the following: * credentials.zip * sota_local.toml -- a config file for aktualizr -* (Optional) A `virtualsec.json` file containing the configuration for one or more secondary ECUs.footnote:[The terms "primary" and "secondary" ECU are used in the Uptane specification. For more information about the difference between primary and secondary ECUs, see our overview of the dev@ota-client::uptane.adoc#_primary_and_secondary_ecus[Uptane framework].] +* (Optional) A `virtualsec.json` file containing the configuration for one or more Secondary ECUs.footnote:[The terms "Primary" and "Secondary" ECU are used in the Uptane specification. For more information about the difference between Primary and Secondary ECUs, see our overview of the xref:ota-client::uptane.adoc#_primary_and_secondary_ecus[Uptane framework\].] An example directory is below: @@ -162,8 +163,8 @@ From the directory you've created, run aktualizr and point it to the current dir aktualizr -c . -This will start aktualizr in its normal mode: it will provision with the server using the `credentials.zip` provided, then start listening for updates. You can also xref:aktualizr-runningmodes-finegrained-commandline-control.adoc[selectively trigger aktualizr] or use any of the other options; you just need to specify `-c .` each time. +This will start aktualizr in its normal mode: it will provision with the server using the `credentials.zip` provided, then start listening for updates. You can also xref:ota-client::aktualizr-runningmodes-finegrained-commandline-control.adoc[selectively trigger aktualizr] or use any of the other options; you just need to specify `-c .` each time. -You should now be able to see your simulated device provisioned into your OTA Connect account, with two secondary ECUs listed. +You should now be able to see your simulated device provisioned into your OTA Connect account, with two Secondary ECUs listed. NOTE: Because the aktualizr config we've given uses relative paths, you *must* run aktualizr from the directory you created. However, if you prefer, you can use absolute paths in the config file instead. The reason we've chosen to use relative paths is to make it easy to simulate multiple distinct devices, by simply making a copy of the whole directory. diff --git a/docs/ota-client-guide/modules/ROOT/pages/simulate-device-cred-provtest.adoc b/docs/ota-client-guide/modules/ROOT/pages/simulate-device-cred-provtest.adoc index 51e2324933..5642f6d765 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/simulate-device-cred-provtest.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/simulate-device-cred-provtest.adoc @@ -7,34 +7,38 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] +To provision with device credentials in production, you need to have a root CA. If you want to test this provisioning method without generating a root CA, you can simulate it with the `aktualizr-cert-provider` tool. -To provision with device credentials in production, you need to have a root CA. If you want to test this provisioning method without generating a root CA, you can simulate it with the `aktualizr-cert-provider` command. +To use `aktualizr-cert-provider`, you must still generate a xref:getstarted::generating-provisioning-credentials.adoc[provisioning key] that your devices can share. But with this method, you use the provisioning key to sign the device certificate. -To use the `aktualizr-cert-provider` command, you must still generate a xref:getstarted::generating-provisioning-credentials.adoc[provisioning key] that your devices can share. But with this method, you use the provisioning key to sign the device certificate. - -In production, you would use the root CA to sign the device certificate, but this method is useful for testing. +In production, you should use the root CA to sign the device certificate, but this method is useful for testing. To simulate provisioning with a device certificate, follow these steps: :: -1. Add the following lines to your local.conf: +. Add the following lines to your local.conf: + ---- -SOTA_CLIENT_PROV = "aktualizr-ca-device-prov" +SOTA_CLIENT_PROV = "aktualizr-device-prov" SOTA_DEPLOY_CREDENTIALS = "0" ---- - -1. Build a standard image using the bitbake command. -1. Boot the image. +. Build a standard image with bitbake. +. Boot the image. + -The device should not be able to provision with a provisioning key. To verify this, log in to the {product-name} server and make sure that the device does not appear in the list of devices. -1. Load the device credentials on to the device with `aktualizr-cert-provider` command: +The device should not be able to provision itself automatically. To verify this, log in to the {product-name} server and make sure that the device does not appear in the list of devices. +. Load the device credentials on to the device with `aktualizr-cert-provider`: + ---- aktualizr-cert-provider -c credentials.zip -t -d /var/sota/import -r -u ---- + -You can find the link:https://github.com/advancedtelematic/aktualizr/tree/master/src/cert_provider[`aktualizr-cert-provider` source] in the aktualizr repo. You can also find a compiled binary in the host work directory of bitbake. +You can find the link:https://github.com/advancedtelematic/aktualizr/tree/master/src/cert_provider[`aktualizr-cert-provider` source] in the aktualizr repo. You can also find a compiled binary in the host work directory of bitbake. + The path should resemble the following example: + -`tmp/work/x86_64-linux/aktualizr-native/1.0+gitAUTOINC+/build/src/cert_provider/aktualizr-cert-provider`. +`tmp/work/x86_64-linux/aktualizr-native/1.0+gitAUTOINC+/build/src/cert_provider/aktualizr-cert-provider`. + +For more extensive information on provisioning methods and configuration, see the following topics: +* xref:client-provisioning-methods.adoc[Device provisioning methods] +* xref:provisioning-methods-and-credentialszip.adoc[Provisioning methods and credentials.zip] +* xref:enable-device-cred-provisioning.adoc[Enable device-credential provisioning and install device certificates] +* xref:build-configuration.adoc[Build configuration for meta-updater] diff --git a/docs/ota-client-guide/modules/ROOT/pages/software-management.adoc b/docs/ota-client-guide/modules/ROOT/pages/software-management.adoc index be7483a1ed..8f761301b0 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/software-management.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/software-management.adoc @@ -1,4 +1,5 @@ = Software management +:page-partial: ifdef::env-github[] [NOTE] @@ -7,13 +8,13 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] -:page-partial: + When you develop software updates for OTA deployment, it's important to understand the two different ways in which you can get software into OTA Connect. OTA Connect is designed for delivering firmware and/or disk images to automotive electronic control units (ECUs). But because we designed it with auto-industry robustness and security requirements in mind, it's also a great way to deliver updates to any embedded Linux system. To really understand how and why OTA Connect works the way it does, though, it's a good idea to keep automotive use cases in mind. -In general, in a vehicle, we have dozens of individual ECUs, with varying capabilities. In a modern vehicle, that will usually include a small number of high-capability ECUs, including one with a connection to the internet. We call that one the *primary* ECU, and all the others *secondary* ECUs. For OTA software updates, it's the job of the primary to download and check updates for itself, and for all the other ECUs in the vehicle. The secondaries get their updates from the primary, along with metadata that allows them to check for themselves whether the update is valid. +In general, in a vehicle, we have dozens of individual ECUs, with varying capabilities. In a modern vehicle, that will usually include a small number of high-capability ECUs, including one with a connection to the internet. We call that one the *Primary* ECU, and all the others *Secondary* ECUs. For OTA software updates, it's the job of the Primary to download and check updates for itself, and for all the other ECUs in the vehicle. The Secondaries get their updates from the Primary, along with metadata that allows them to check for themselves whether the update is valid. Broadly speaking, there are two types of software image that you can upload to your personal software repository on OTA Connect: @@ -28,8 +29,8 @@ When you build a disk image for your embedded device, you normally get a file, l == Uploading other image files -OTA Connect is also designed to enable secure updates to secondary ECUs. This can, of course, include non-Linux systems, and even tiny microcontrollers with no operating system. For these devices, obviously, a filesystem image doesn't make sense. That's why you can xref:dev@ota-web::upload-software-ui.adoc[upload software packages for this type of ECU directly in the OTA Connect portal]. When you do, you'll specify exactly which type of ECU the software is for, to make sure you don't send the wrong type of image. +OTA Connect is also designed to enable secure updates to Secondary ECUs. This can, of course, include non-Linux systems, and even tiny microcontrollers with no operating system. For these devices, obviously, a filesystem image doesn't make sense. That's why you can xref:ota-web::upload-software-ui.adoc[upload software packages for this type of ECU directly in the OTA Connect portal]. When you do, you'll specify exactly which type of ECU the software is for, to make sure you don't send the wrong type of image. === Using the file upload for things that aren't ECUs -Even though this functionality was developed for delivering firmware to secondary ECUs, you can use it to send other kinds of updates as well, even for a single device. For example, you might have a data partition, software packages, or apps that you want to send updates to independently from the base system. Or, you might want to create a virtual secondary for testing, and have it just drop the file you uploaded somewhere on the filesystem. +Even though this functionality was developed for delivering firmware to Secondary ECUs, you can use it to send other kinds of updates as well, even for a single device. For example, you might have a data partition, software packages, or apps that you want to send updates to independently from the base system. Or, you might want to create a virtual Secondary for testing, and have it just drop the file you uploaded somewhere on the filesystem. diff --git a/docs/ota-client-guide/modules/ROOT/pages/supported-boards.adoc b/docs/ota-client-guide/modules/ROOT/pages/supported-boards.adoc index 3f0228a79f..e07e6dfd9b 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/supported-boards.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/supported-boards.adoc @@ -8,12 +8,14 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} endif::[] -* https://github.com/advancedtelematic/meta-updater-raspberrypi[Raspberry Pi 2 and 3] +* https://github.com/advancedtelematic/meta-updater-raspberrypi[Raspberry Pi 2, 3, and 4] (4 beginning with warrior) * https://github.com/advancedtelematic/meta-updater-minnowboard[Intel Minnowboard] * https://github.com/advancedtelematic/meta-updater-qemux86-64[Native QEMU emulation] * Renesas R-Car H3 and M3 * https://github.com/advancedtelematic/meta-updater-ti/[TI BeagleBone Black] (rocko only, using TI SDK 05.03) * https://github.com/advancedtelematic/meta-updater-ti/[TI AM65x industrial development kit] (rocko only, using TI SDK 05.03) +* https://github.com/advancedtelematic/meta-updater-ti/[TI AM65x industrial development kit] (rocko only, using TI SDK 05.03) +* https://github.com/advancedtelematic/meta-updater/pull/741[NXP LS1043ARDB] (warrior only) Additionally, there is community support for https://github.com/ricardosalveti/meta-updater-riscv[RISC-V] boards, in particular the Freedom U540. @@ -21,12 +23,8 @@ We also historically supported the https://github.com/advancedtelematic/meta-upd == Adding support for your board -If your board isn't supported yet, you can add board integration code yourself. The main purpose of this code is to provide a bootloader that will be able to use https://ostree.readthedocs.io/en/latest/manual/atomic-upgrades/[OSTree's boot directory]. In the meta-updater integration layers we have written so far, the basic steps are: - -1. Make the board boot into http://www.denx.de/wiki/U-Boot[U-Boot] -2. Make U-boot import variables from /boot/loader/uEnv.txt and load the kernel with initramfs and kernel command line arguments according to what is set in this file. +If your board isn't supported yet, you can add board integration code yourself. To do this, follow our xref:bsp-integration.adoc[BSP integration guide]. -Take a look at the https://github.com/advancedtelematic/meta-updater-minnowboard[Minnowboard] or https://github.com/advancedtelematic/meta-updater-raspberrypi[Raspberry Pi] integration layers for examples. If you want our developers to add support for your board, contact us at mailto:otaconnect.support@here.com[] and we can discuss a potential NRE (Non-recurring Engineering) agreement. @@ -35,18 +33,18 @@ If you want our developers to add support for your board, contact us at mailto:o Although we have focused on U-Boot and GRUB so far, other bootloaders can be configured to work with OSTree as well. If you want to use a different bootloader, contact us at mailto:otaconnect.support@here.com[]. ==== -Your images will also need network connectivity to be able to reach an actual OTA backend. Our 'poky-sota' distribution does not mandate or install a default network manager but our supported platforms use the `virtual/network-configuration` recipe, which can be used as a starting example. +Your images will also need network connectivity to be able to reach an actual OTA backend. Our `poky-sota-systemd` distribution does not mandate or install a default network manager but our supported platforms use the `network-configuration` recipe, which can be used as a starting example. == Minimum hardware requirements for controllers (ECUs) -The aktualizr binary is a lightweight {cpp} application between 2-5 MB in size. It uses a minimum amount of resources when idle. +The aktualizr binary is a lightweight {cpp} application between 2-5 MB in size. It uses around 20 MB RAM, although some of that comes from shared libraries. It uses a minimum amount of other resources when idle. -The following hardware is required for your primary ECU: +The following hardware is required for your Primary ECU: -* At a minimum, aktualizr needs **16 MB of RAM** and **128 MB of storage** to run. -* We recommend that you use a controller with **128 MB of RAM** and **512 MB storage** -- especially if you if your plan to process large, complex sofware updates. +* At a minimum, the board or device aktualizr is running on will need **64 MB of RAM** and **128 MB of storage**. +* We recommend using a board with slightly higher specs, however. **256 MB of RAM** and **512 MB storage** are reasonable targets, especially if you plan to process large, complex software updates. The aktualizr process itself won't consume significantly more RAM with larger updates, but the filesystem cache memory usage might increase. [NOTE] ==== -If you plan to send updates to secondary, low-performance ECUs, you can also use a more minimal implementaton called link:https://github.com/advancedtelematic/uptiny[`libuptiny`]. This utility is only 10 KB and performs a minimal verification of software metadata that is less resource intensive. For more information on `libuptiny`, contact us at mailto:otaconnect.support@here.com[]. +If you plan to send updates to low-performance Secondary ECUs, you can also use a more minimal implementaton called link:https://github.com/advancedtelematic/uptiny[`libuptiny`]. This utility is only 10 KB and performs a minimal verification of software metadata that is less resource intensive. For more information on `libuptiny`, contact us at mailto:otaconnect.support@here.com[]. ==== diff --git a/docs/ota-client-guide/modules/ROOT/pages/troubleshooting-bsp-integration.adoc b/docs/ota-client-guide/modules/ROOT/pages/troubleshooting-bsp-integration.adoc new file mode 100644 index 0000000000..231d57caa9 --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/troubleshooting-bsp-integration.adoc @@ -0,0 +1,43 @@ += Troubleshooting BSP Integration +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + +include::partial$aktualizr-version.adoc[] + +== U-Boot loading the wrong config file + +U-Boot finds its initial configuration file by looping through the partitions in order and searching for `uEnv.txt` first in `/`, then in `/boot/`. If U-Boot isn’t loading the correct initial file, you should check the partitions layout, and see if there’s a vendor-provided `uEnv.txt` in a partition that comes physically before the one you’ve placed the OSTree initial `uEnv.txt`. + + +== Vendor's environment setup script overrides values that meta-updater has set. + +Values specified in `local.conf` have the highest priority in a Yocto build. Most vendors provide a setup script for the build environment that also generates a `local.conf` with many values pre-populated. In some cases, those values can conflict with changes that the meta-updater makes. + + +For the LS1043ARDB, the environment setup script added a line to the `local.conf` that overrode the `INITRAMFS_IMAGE` value. To fix this, you need to specify the correct value in your `local.conf` when you build: +[source,bash] +---- +INITRAMFS_IMAGE = "initramfs-ostree-image" +---- + +== Further Information + +Building an integration with a new board involves dealing with several different systems. See the following links for more information on this: + +* https://www.yoctoproject.org/docs/{yocto-version}/mega-manual/mega-manual.html[Yocto Mega Manual, v{yocto-version}]: The Yocto Mega Manual is a concatenation of all the various other reference manuals; it’s usually better to use the individual manuals if you know what you’re looking for. In particular, these three are the most frequently used in the BSP development domain: +** https://www.yoctoproject.org/docs/{yocto-version}/ref-manual/ref-manual.html[Yocto Reference Manual, v{yocto-version}] +** https://www.yoctoproject.org/docs/{yocto-version}/bsp-guide/bsp-guide.html[Yocto BSP Developer's Guide, v{yocto-version}] +** https://www.yoctoproject.org/docs/{yocto-version}/bitbake-user-manual/bitbake-user-manual.html[Bitbake User Manual, v{yocto-version}] + +* https://ostreedev.github.io/ostree/[libostree reference documentation] +** https://ostreedev.github.io/ostree/deployment/[Deployments] +** https://ostreedev.github.io/ostree/atomic-upgrades/[Atomic Upgrades] + +* https://www.denx.de/wiki/Knowhow/DULG/Manual[U-Boot reference documentation] +** https://www.denx.de/wiki/Knowhow/DULG/UBootCommandLineInterface[CLI] +** https://www.denx.de/wiki/Knowhow/DULG/UBootScripts[Scripting] diff --git a/docs/ota-client-guide/modules/ROOT/pages/troubleshooting.adoc b/docs/ota-client-guide/modules/ROOT/pages/troubleshooting.adoc index 23268a59ab..a360a39937 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/troubleshooting.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/troubleshooting.adoc @@ -7,6 +7,8 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} ==== endif::[] +include::partial$aktualizr-version.adoc[] + :page-layout: page :page-categories: [tips] :page-date: 2017-06-13 10:51:53 @@ -60,6 +62,10 @@ source meta-updater/scripts/envsetup.sh qemux86-64 Depending on your use case, however, it often makes more sense to just keep separate project directories for your separate architectures, and share the state cache and downloads directories between them. +=== Build fails with Python errors + +Ubuntu users that encounter an error due to missing `Python.h` should install `libpython2.7-dev` on their host machine. + == Runtime problems === File ownership incorrect after an update (static UID to name mapping) @@ -74,7 +80,7 @@ This is required if you have a service that does all of the following: * Runs as an account that is not one of the 'standard' accounts (`root`, `floppy`, `man`, `tape`, etc) * The recipe for the service creates the user (using `useradd`) -In this case, you should set `useradd`. The link:https://www.yoctoproject.org/docs/2.6/mega-manual/mega-manual.html#ref-classes-useradd[useradd] section of the Yocto Mega Manual describes this process in more detail. +In this case, you should set `useradd`. The link:https://www.yoctoproject.org/docs/{yocto-version}/mega-manual/mega-manual.html#ref-classes-useradd[useradd] section of the Yocto Mega Manual describes this process in more detail. .Don't see your problem here? diff --git a/docs/ota-client-guide/modules/ROOT/pages/update-single-device.adoc b/docs/ota-client-guide/modules/ROOT/pages/update-single-device.adoc index 568cdfeef4..136c0b64b5 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/update-single-device.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/update-single-device.adoc @@ -8,13 +8,13 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} endif::[] -Once you've uploaded a few different software versions, you can try to push that software to a device that doesn't yet have the version installed. +Once you've uploaded a few different software versions, you can try to push that software to a device that doesn't yet have the version installed. -If you have more than one Raspberry Pi lying around, you could use the disk image that you created in the xref:build-raspberry.adoc[Raspberry Pi build instructions]. Flash that image to your second Raspberry Pi and boot it up. Otherwise, you could xref:build-qemu.adoc#_run_the_built_image_with_qemu[start another QEMU instance] based on the same disk image but with a different mac address. +If you have more than one Raspberry Pi lying around, you could use the disk image that you created in the xref:build-raspberry.adoc[Raspberry Pi build instructions]. Flash that image to your second Raspberry Pi and boot it up. Otherwise, you could xref:build-qemu.adoc#_run_the_built_image_with_qemu[start another QEMU instance] based on the same disk image but with a different mac address. After your second device boots, it should automatically provision with OTA Connect. -* Log in to the xref:https://connect.ota.here.com[OTA Connect Portal] and check that second devices shows up in the last of mostly recently provision devices. +* Log in to the https://connect.ota.here.com[OTA Connect Portal] and check that second devices shows up in the last of mostly recently provision devices. + This list appears on the Dashboard that you see after you log in. If you see your second device, you're ready to send it an update. diff --git a/docs/ota-client-guide/modules/ROOT/pages/update-your-client-configuration.adoc b/docs/ota-client-guide/modules/ROOT/pages/update-your-client-configuration.adoc deleted file mode 100644 index a43de050bb..0000000000 --- a/docs/ota-client-guide/modules/ROOT/pages/update-your-client-configuration.adoc +++ /dev/null @@ -1,44 +0,0 @@ -= Recommended client configurations -ifdef::env-github[] - -[NOTE] -==== -We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. -==== -endif::[] - - -Before you start developing or deploying to production, you should check that your configuration file has appropriate settings for your use case. - -For more details about the full client configuration settings, see the xref:aktualizr-config-options.adoc[client configuration reference]. - -== Recommended settings for development - -[cols="1,1,1a",options="header,footer"] -|==================== -|Name | Default | Description -|`polling_sec` | `10` | -Interval between polls (in seconds). - -The default polling internal designed to make it convenient for you test and develop OTA update functions. -|`force_install_completion` | `true` | -Forces installation completion. Causes a system reboot in case of an ostree package manager. Emulates a reboot in case of a fake package manager. - -You'll want to set this to `true` when developing because it's more convenient. - -|==================== - -== Recommended settings for production - -[cols="1,1,1a",options="header,footer"] -|==================== -|Name | Default | Description -|`polling_sec` | `86400` | When moving to production you'll want to have a much longer interval. -In fact, for production, we don't support intervals less the 1 hour (3,600 seconds). Longer internals help you to reduce the internet bandwidth and power consumption for your devices. - -We recommend an internal between 1 and 7 days (86,400 to 604,800 seconds). -|`force_install_completion` | `false` | -If you followed our recommendation to enable automatic rebooting for development, you should turn it off again for production. -|==================== - - diff --git a/docs/ota-client-guide/modules/ROOT/pages/upload-large-binary.adoc b/docs/ota-client-guide/modules/ROOT/pages/upload-large-binary.adoc new file mode 100644 index 0000000000..cb7388c3c9 --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/upload-large-binary.adoc @@ -0,0 +1,92 @@ += Upload a binary file +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + +If you want to upload large binary files (up to 3 GB) or your xref:ota-client::rotating-signing-keys.adoc[software signing keys are offline], use the `garage-sign` tool on the command line. + +NOTE: You can upload software packages of up to 1 GB on the OTA Connect Portal. For instructions, see xref:ota-web::upload-software-ui.adoc[Upload software versions] in the OTA Connect User Guide. + +*To upload a binary file using `garage-sign`:* + +// tag::gs-initialize[] +. Make sure you have the link:https://tuf-cli-releases.ota.here.com/index.html[latest version,window="_blank"] of the `garage-sign` tool. +. Get the .zip file with your provisioning credentials. ++ +For instructions, see the xref:ota-client::generating-provisioning-credentials.adoc[related] section in this guide. + +. Initialize a local repository. ++ +NOTE: For safety reasons, we recommend xref:keep-local-repo-on-external-storage.adoc[keeping your local repository on an external storage device]. ++ +[source, bash] +---- +garage-sign init \ + --repo \ + --credentials +---- +// end::gs-initialize[] + +// tag::gs-pull-targets[] +. Pull the latest version of the `targets.json` file. ++ +[source,bash] +---- +garage-sign targets pull \ + --repo +---- +// end::gs-pull-targets[] + +. To upload your binary to OTA Connect, specify its name and version. ++ +[source,bash] +---- +garage-sign targets upload \ + --repo \ + --input \ + --name \ + --version +---- ++ +include::garage-sign-reference.adoc[tags=targets-upload-note] + +. If the upload is successful, add it to your local Targets metadata. ++ +The binary name and version must be the same as in step 5. ++ +[source,bash] +---- +garage-sign targets add-uploaded \ + --repo \ + --input \ + --name \ + --version \ + --hardwareids , +---- + +// tag::gs-sign-targets[] +. Sign the new `targets.json` file with your Targets key. ++ +[source,bash] +---- +garage-sign targets sign \ + --repo \ + --key-name mytargets +---- +// end::gs-sign-targets[] + +// tag::gs-push-targets[] +. Push the new `targets.json` file to OTA Connect. ++ +[source,bash] +---- +garage-sign targets push \ + --repo +---- +// end::gs-push-targets[] + +To learn more about the `garage-sign` commands and options, see its xref:garage-sign-reference.adoc[reference] documentation. diff --git a/docs/ota-client-guide/modules/ROOT/pages/uptane-generator.adoc b/docs/ota-client-guide/modules/ROOT/pages/uptane-generator.adoc index b28ab9b05c..c353feb3ae 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/uptane-generator.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/uptane-generator.adoc @@ -49,7 +49,7 @@ Make sure that the repository path doesn't already exist and the machine where ` uptane-generator --path --command generate ``` -2. Add a target to the images metadata: +2. Add a target to the Image repo metadata: + ``` uptane-generator --path --command image --filename --targetname --hwid @@ -57,7 +57,7 @@ uptane-generator --path --command image --filename --ta + This step can be repeated as many times as necessary for each target. `--targetname` is optional. If it is not provided, it is assumed to be the same as the image name provided to `--filename`. -3. Prepare director targets metadata for a given device: +3. Prepare Director Targets metadata for a given device: + ``` uptane-generator --path --command addtarget --targetname --hwid --serial @@ -65,7 +65,7 @@ uptane-generator --path --command addtarget --targetname --command signtargets @@ -87,19 +87,19 @@ uptane-generator --path --command image --filename --ta ==== Generating metadata without a real file -To add a target to the images metadata without providing an actual file, you can supply alternative parameters to the `image` command: +To add a target to the Image repo metadata without providing an actual file, you can supply alternative parameters to the `image` command: ``` uptane-generator --path --command image --targetname --targetsha256 --targetsha512 --targetlength --hwid ``` -==== Advanced director metadata control +==== Advanced Director metadata control -To reset the director targets metadata or to prepare empty targets metadata, use the `emptytargets` command. If you then sign this metadata with `signtargets`, it will schedule an empty update. +To reset the Director Targets metadata or to prepare empty Targets metadata, use the `emptytargets` command. If you then sign this metadata with `signtargets`, it will schedule an empty update. ``` uptane-generator --path --command emptytargets ``` -To populate the director targets metadata with the currently signed metadata (with the previous signature removed), use the `oldtargets` command. You can then add more targets with `addtarget` and re-sign with `signtargets`. +To populate the Director Targets metadata with the currently signed metadata (with the previous signature removed), use the `oldtargets` command. You can then add more targets with `addtarget` and re-sign with `signtargets`. ``` uptane-generator --path --command oldtargets ``` @@ -113,7 +113,7 @@ uptane-generator --path --command sign --repotype - ==== Add custom URLs -To add a custom URL to an image in the Targets metadata of the Images repository: +To add a custom URL to an image in the Targets metadata of the Image repository: ``` uptane-generator --path --command image --filename --targetname --hwid --url ``` diff --git a/docs/ota-client-guide/modules/ROOT/pages/uptane.adoc b/docs/ota-client-guide/modules/ROOT/pages/uptane.adoc index 5feb28e332..d039e496e3 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/uptane.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/uptane.adoc @@ -40,11 +40,11 @@ To see an example of this metadata, open the link:https://raw.githubusercontent. * `targets.json`: + -The instance of `targets.json` in your image repository contains information about all the valid software files. +The instance of `targets.json` in your image repository contains information about all the valid software files. + As mentioned previously, there are two versions of each metadata file, one in your image repository and one in the Director service. + -The _other_ instance of `targets.json` is in the Director service. It only contains information about the software files that are included in a specific update. The entries in this file are cross-referenced with the corresponding entries in `targets.json` for your image repository. +The _other_ instance of `targets.json` is in the Director service. It only contains information about the software files that are included in a specific update. The entries in this file are cross-referenced with the corresponding entries in `targets.json` for your image repository. + Basically this comparison is answering the question: + @@ -58,10 +58,10 @@ OTA Connect manages the Director for you. When you create an update campaign, we The keys for the metadata in your image repository should be managed on your side; we provide tooling to help you do just that. When you build a new device, bitbake automatically signs the image for you, using keys that you specify in the build's `local.conf`{zwsp}footnote:[These keys are packed inside the zip file specified by the `SOTA_PACKED_CREDENTIALS` line.]. Your initial key is created by HERE OTA Connect, delivered to you inside your `credentials.zip` file, and kept online for convenience and bootstrapping; for any production use you should xref:rotating-signing-keys.adoc[manage the keys for software metadata] yourself and store them securely offlinefootnote:[Proceed with caution! Once you take the key offline, HERE Technologies cannot recover it.]. -== Primary and secondary ECUs +== Primary and Secondary ECUs -In the Uptane framework, an ECU is categorized as either a primary or a secondary ECU. In most cases, a vehicle has one primary ECU and several secondary ECUs. The primary ECU is responsible for downloading and distributing software to the secondary ECUs. In many cases, the Telematics Control Unit (TCU) serves the role of primary ECU. A primary ECU also verifies and distributes the Uptane-compliant metadata associated with each piece of software. +In the Uptane framework, an ECU is categorized as either a Primary or a Secondary ECU. In most cases, a vehicle has one Primary ECU and several Secondary ECUs. The Primary ECU is responsible for downloading and distributing software to the Secondary ECUs. In many cases, the Telematics Control Unit (TCU) serves the role of Primary ECU. A Primary ECU also verifies and distributes the Uptane-compliant metadata associated with each piece of software. Secondary ECUs, such as the Transmission or Body control modules, receive the software and should also perform some form of metadata verification. If the ECU has sufficient processing capabilities, it should perform a full verification of the Uptane-compliant metadata, otherwise it should at least perform a partial verification. -To get an overview of the ECUs detected in a device, open the OTA Connect web application and navigate to the device details for the device. This view shows the ECUs grouped according to whether they are primary and secondary ECUs. +To get an overview of the ECUs detected in a device, open the OTA Connect web application and navigate to the device details for the device. This view shows the ECUs grouped according to whether they are Primary and Secondary ECUs. diff --git a/docs/ota-client-guide/modules/ROOT/pages/use-your-own-deviceid.adoc b/docs/ota-client-guide/modules/ROOT/pages/use-your-own-deviceid.adoc index ef40c3d907..b159a5f002 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/use-your-own-deviceid.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/use-your-own-deviceid.adoc @@ -8,32 +8,24 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} endif::[] -In OTA Connect, a device has two types of identifier: an internal device UUID, and a standard device ID. +In OTA Connect, a device has two types of identifiers: an internal device UUID and a standard device ID. By default, OTA Connect generates a random device ID for you, but you can also create your own device ID. You can later use this unique device ID to xref:ota-web::create-custom-device-fields.adoc[create custom device fields] and xref:ota-web::create-smart-group.adoc[group devices]. -// MC: use xref:otaconnect-identifiers.adoc[identifier]: when topic is finished -By default, OTA Connect generates a random device ID for you, but you can override this behavior. +You can create a custom device ID in one of the following ways: -The standard way to specify device identity is in the device certificate -- specifically, in the Subject Distinguished Name (DN) field. +* Specify your unique device ID in the aktualizr configuration. For more information, see the xref:aktualizr-config-options.adoc#_provision[reference documentation]. -This method requires that you use device-credential provisioning which tells OTA Connect to use your device certificates for authentication. This method is a little more complicated than the "shared-credential" provisioning method that you might have used in the "Get Started" guide. - -However you can try our test procedures to specify your own Device IDs. Earlier in this guide, we showed you how to xref:generate-devicecert.adoc[generate and sign a device certificate] using a self-signed root certificate. - -* You can define your own device ID in the device certificate, by changing step 1 of that procedure - -** Replace the existing sample command: -+ -`export device_id=${DEVICE_ID:-${DEVICE_UUID}}` -+ -Update the command with your device ID instead: +* If you use the device-credential provisioning method, when you xref:generate-devicecert.adoc[generate a device certificate], update the following command with your custom device ID: + -`export device_id=` +``` +export device_id=${DEVICE_ID:-$\{DEVICE_UUID}} +``` + -For example: -`export device_id=1HTHCATR81H391382` - -** You can then continue the test procedure to provision your device. - -* If you already have your own procedure for generating device certificates, then you're probably already using your own device IDs to bind each certificate to the device. +.Custom device ID +==== +You want to use the following link:https://en.wikipedia.org/wiki/Vehicle_identification_number[vehicle identification number (VIN)] as your device ID: `SAJWA1C78D8V38055`. +``` +export device_id=SAJWA1C78D8V38055 +``` +==== -In any case, OTA Connect uses the device ID that you've defined in your device certificates instead of automatically generating one. \ No newline at end of file +OTA Connect will not generate a new device ID but use the device ID that you have defined in your device certificates. diff --git a/docs/ota-client-guide/modules/ROOT/pages/useful-bitbake-commands.adoc b/docs/ota-client-guide/modules/ROOT/pages/useful-bitbake-commands.adoc index dfa19783a2..4e7d1fa0a7 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/useful-bitbake-commands.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/useful-bitbake-commands.adoc @@ -15,7 +15,7 @@ endif::[] You're going to run into Yocto build problems eventually--that's a given. We provide here a guide to a few bitbake commands we find particularly useful. -=== Clean the build environment +== Clean the build environment Yocto does a lot of caching and tries to be as smart about it as possible. But if you're running into build problems for a particular package, a good first start is @@ -23,42 +23,42 @@ Yocto does a lot of caching and tries to be as smart about it as possible. But i This will remove everything in the work directory, everything in the state cache, and all previously downloaded source files. -=== View the actual build environment bitbake will execute +== View the actual build environment bitbake will execute bitbake -e [package] > env.log This is the bazooka of bitbake troubleshooting. It will output the entire build environment that bitbake uses for that package or image. This output will be tens of thousands of lines long, but can be grepped for what you need. -=== Launch the bitbake devshell for a package +== Launch the bitbake devshell for a package bitbake -c devshell [package] The devshell is the scalpel of bitbake troubleshooting. This command will open a new terminal in the package's build directory with bitbake's environment set up, after the source files have been fetched and all compile-time dependencies have been built, but before any configure/compile steps for the package have been taken. From here, you can troubleshoot specific problems with your build. -=== Launch the dependency explorer for a package +== Launch the dependency explorer for a package bitbake [package] -g -u depexp A GUI tool for exploring package dependencies. -=== Show the layers currently in your build +== Show the layers currently in your build bitbake-layers show-layers Outputs a list of the layers currently in use, and their priorities. If a package exists in two or more layers, it will be build from the layer with higher priority. -=== Show all available recipes +== Show all available recipes bitbake-layers show-recipes -=== List all packages that will be built in an image/package +== List all packages that will be built in an image/package bitbake -g [package] && cat pn-depends.dot | grep -v -e '-native' | \ grep -v digraph | grep -v -e '-image' | awk '{print $1}' | sort | uniq A concise text dump of all of the dependencies of a package. Includes both runtime and compile-time dependencies. -=== Save verbose build log +== Save verbose build log bitbake -v [package] 2>&1 | tee build.log diff --git a/docs/ota-client-guide/modules/ROOT/pages/virtual-secondaries.adoc b/docs/ota-client-guide/modules/ROOT/pages/virtual-secondaries.adoc new file mode 100644 index 0000000000..42f8eb6c42 --- /dev/null +++ b/docs/ota-client-guide/modules/ROOT/pages/virtual-secondaries.adoc @@ -0,0 +1,44 @@ += Use of Virtual Secondaries +ifdef::env-github[] + +[NOTE] +==== +We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname}.html[view this article in our documentation portal]. Not all of our articles render correctly in GitHub. +==== +endif::[] + + +Virtual Secondaries can be used as a way to update a configuration file on the Primary (where the Primary is an ECU with a standard OSTree image running aktualizr). You can do this in two parts: + +* First you need a virtual Secondary configured that can be used to update a configuration file. +* Afterwards you can integrate it into your Yocto build. + + + +== Creating a Virtual Secondary + +. You can create a virtual Secondary to run locally by following the steps on how to xref:simulate-device-basic.adoc[simulate a device without building a disk image]. ++ +You can use the provided `.deb` releases mentioned and the `virtualsec.json` file, which you can edit to suit your needs: such as removing one of the two entries and setting the paths and names to whatever you prefer. +. Afterwards, you will be able to download the arbitrary files to the location specified in the `firmware_path` variable. +. Next, you can upload the files for the virtual Secondary. To do so, go to the *Software versions* tab in the https://connect.ota.here.com[OTA Connect Portal] and click *Add Software*. You should make sure the *ECU Types* field matches the value specified in the `ecu_hardware_id` variable of the `virtualsec.json` file. ++ +NOTE: `ecu_hardware_id` must be unique per each file type that you'll like to deliver, otherwise an update of one of them will rewrite the others. + + +== Updating via Virtual Secondaries + +To issue the update perform the following steps: + +. Go to your *Devices* tab and find your Primary. (You can run `aktualizr-info` locally to get the name of the Primary if you don't know it.) +. Afterwards, select your virtual Secondary and the file you uploaded to the *Software versions* tab. +. After your selection, the Primary will begin to download the file. ++ +For future reference, you can use the *Campaigns* tab to send updates to multiple devices simultaneously. + + + +== Integrating Virtual Secondaries into Yocto + +You can use the files in the repository https://github.com/advancedtelematic/meta-updater/blob/master/recipes-sota/config/aktualizr-virtualsec.bb[here] as an example to get the virtual Secondaries to work in Yocto. You can use the files directly or you can copy them to one of your layers and modify them according to your needs. + diff --git a/docs/ota-client-guide/modules/ROOT/pages/workflow-overview.adoc b/docs/ota-client-guide/modules/ROOT/pages/workflow-overview.adoc index 88a9ee3de9..d7798ccb45 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/workflow-overview.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/workflow-overview.adoc @@ -49,14 +49,14 @@ This is all done with the meta-updater Yocto layer, as an integrated part of the == Installing updates on devices -In OTA Connect, we generally assume that updates will be OSTree images, but in fact OTA Connect can be used to send all kinds of other updates.footnote:[For more information on using OTA Connect for other types of update, please contact us at link:mailto:otaconnect.support@here.com[otaconnect.support@here.com].] But for now, let's look at the default OSTree update process. +In OTA Connect, we generally assume that updates will be OSTree images or complete firmware images to flash onto Secondary ECUs, but in fact OTA Connect can be used to send all kinds of other updates.footnote:[For more information on using OTA Connect for other types of update, please contact us at link:mailto:otaconnect.support@here.com[otaconnect.support@here.com].] For now, let's look at the default OSTree update process. .OTA Connect device update flow **** . The client polls OTA Connect servers periodically to check if there are any new updates. -. If there are, the client requests to download the update, which consists of a metadata file pointing to a particular OSTree commit. (For other types of update, the download might be something else, like application binaries, installation packages containing binaries and install scripts, or data packs.) -. The update is cryptographically checked for validity following the https://uptane.github.io[Uptane] https://uptane.github.io/uptane-standard/uptane-standard.html[specification]. -. The client checks if the reference is available locally; if it's not, the commit is downloaded from TreeHub, only actually downloading objects not already present in the repo. +. If there are, the client receives a metadata file directing it to install a particular image. +. The metadata is cryptographically checked for validity following the https://uptane.github.io[Uptane] https://uptane.github.io/uptane-standard/uptane-standard.html[specification]. +. If all the checks pass, the client downloads the image. In the case of an OSTree update, the commit representing the image is downloaded from TreeHub, which only actually requires downloading any objects missing on the client. . Each object's SHA256 is checked for correctness. . Once all objects are downloaded and verified, a flag is set telling OSTree to boot into the new filesystem the next time the device restarts. **** diff --git a/docs/ota-client-guide/modules/ROOT/pages/yocto-release-branches.adoc b/docs/ota-client-guide/modules/ROOT/pages/yocto-release-branches.adoc index b1aa63bb40..8811a2d10d 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/yocto-release-branches.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/yocto-release-branches.adoc @@ -10,8 +10,10 @@ endif::[] == Supported branches -Yocto has a number of release branches. Their details are documented in the https://wiki.yoctoproject.org/wiki/Releases[Yocto wiki]. HERE OTA Connect currently actively supports the following branches: +Yocto has a number of release branches. Their details are documented in the https://wiki.yoctoproject.org/wiki/Releases[Yocto wiki]. HERE OTA Connect currently supports the following branches: +* dunfell +* zeus * warrior * thud diff --git a/docs/ota-client-guide/modules/ROOT/pages/yocto.adoc b/docs/ota-client-guide/modules/ROOT/pages/yocto.adoc index e90291c1e4..6062905360 100644 --- a/docs/ota-client-guide/modules/ROOT/pages/yocto.adoc +++ b/docs/ota-client-guide/modules/ROOT/pages/yocto.adoc @@ -1,4 +1,4 @@ -== Yocto += Yocto ifdef::env-github[] [NOTE] @@ -8,7 +8,7 @@ We recommend that you link:https://docs.ota.here.com/ota-client/latest/{docname} endif::[] -The link:https://www.yoctoproject.org/[Yocto Project] is an open source collaborative project that provides standardized high-quality infrastructure, tools, and methodology to help decrease the complexity and increase the portability of Linux implementations in the embedded industry. It enables its users to build custom operating systems using specific recipes for embedded devices. Most commercial embedded Linux distros already use and/or support Yocto, including link:https://www.windriver.com/products/linux/[Wind River] and link:http://www.enea.com/solutions/Enea-Linux/[Enea]. It's backed by major hardware vendors like Intel, AMD, Freescale, Mentor, Texas Instruments, and many others. If you need a highly performant customized Linux for your embedded device, whether it's IoT, automotive, or other kinds of mobility devices, the Yocto project is probably what you're using. +The link:https://www.yoctoproject.org/[Yocto Project] is an open source collaborative project that provides standardized high-quality infrastructure, tools, and methodology to help decrease the complexity and increase the portability of Linux implementations in the embedded industry. It enables its users to build custom operating systems using specific recipes for embedded devices. Most commercial embedded Linux distros already use and/or support Yocto, including link:https://www.windriver.com/products/linux/[Wind River] and link:https://www.enea.com/products-services/operating-systems/enea-linux/[Enea]. It's backed by major hardware vendors like Intel, AMD, Freescale, Mentor, Texas Instruments, and many others. If you need a highly performant customized Linux for your embedded device, whether it's IoT, automotive, or other kinds of mobility devices, the Yocto project is probably what you're using. HERE Technologies has created a https://github.com/advancedtelematic/meta-updater/[meta-updater layer] for Yocto, making it easy to get over-the-air update support into your devices. In many cases, it's as simple as adding meta-updater and a board support integration layer to your project and re-running bitbake. The main features of the meta-updater layer are OSTree and our OTA update client, aktualizr. OSTree handles the filesystem versioning, and aktualizr communicates with the server, downloads updates, and cryptographically verifies them following the xref:uptane.adoc[Uptane framework]. diff --git a/docs/posix-secondaries-bitbaking.adoc b/docs/posix-secondaries-bitbaking.adoc deleted file mode 120000 index 26095edac1..0000000000 --- a/docs/posix-secondaries-bitbaking.adoc +++ /dev/null @@ -1 +0,0 @@ -./ota-client-guide/modules/ROOT/pages/posix-secondaries-bitbaking.adoc \ No newline at end of file diff --git a/docs/posix-secondaries.adoc b/docs/posix-secondaries.adoc deleted file mode 120000 index b053868a38..0000000000 --- a/docs/posix-secondaries.adoc +++ /dev/null @@ -1 +0,0 @@ -./ota-client-guide/modules/ROOT/pages/posix-secondaries.adoc \ No newline at end of file diff --git a/docs/provision-with-device-credentials.adoc b/docs/provision-with-device-credentials.adoc deleted file mode 120000 index efce71f879..0000000000 --- a/docs/provision-with-device-credentials.adoc +++ /dev/null @@ -1 +0,0 @@ -./ota-client-guide/modules/ROOT/pages/enable-device-cred-provisioning.adoc \ No newline at end of file diff --git a/docs/provisioning-methods-and-credentialszip.adoc b/docs/provisioning-methods-and-credentialszip.adoc deleted file mode 120000 index a75bb08afd..0000000000 --- a/docs/provisioning-methods-and-credentialszip.adoc +++ /dev/null @@ -1 +0,0 @@ -ota-client-guide/modules/ROOT/pages/provisioning-methods-and-credentialszip.adoc \ No newline at end of file diff --git a/docs/rollback.adoc b/docs/rollback.adoc deleted file mode 120000 index c6e5dada12..0000000000 --- a/docs/rollback.adoc +++ /dev/null @@ -1 +0,0 @@ -./ota-client-guide/modules/ROOT/pages/rollback.adoc \ No newline at end of file diff --git a/docs/schema-migrations.adoc b/docs/schema-migrations.adoc deleted file mode 120000 index 05b57e57ac..0000000000 --- a/docs/schema-migrations.adoc +++ /dev/null @@ -1 +0,0 @@ -./ota-client-guide/modules/ROOT/pages/schema-migrations.adoc \ No newline at end of file diff --git a/docs/security.adoc b/docs/security.adoc deleted file mode 120000 index acb44397f7..0000000000 --- a/docs/security.adoc +++ /dev/null @@ -1 +0,0 @@ -./ota-client-guide/modules/ROOT/pages/security.adoc \ No newline at end of file diff --git a/docs/selectively-triggering-aktualizr.adoc b/docs/selectively-triggering-aktualizr.adoc deleted file mode 120000 index 1acf603da1..0000000000 --- a/docs/selectively-triggering-aktualizr.adoc +++ /dev/null @@ -1 +0,0 @@ -./ota-client-guide/modules/ROOT/pages/aktualizr-runningmodes-finegrained-commandline-control.adoc \ No newline at end of file diff --git a/docs/uptane-generator.adoc b/docs/uptane-generator.adoc deleted file mode 120000 index ddfee75db2..0000000000 --- a/docs/uptane-generator.adoc +++ /dev/null @@ -1 +0,0 @@ -ota-client-guide/modules/ROOT/pages/uptane-generator.adoc \ No newline at end of file diff --git a/fuzz/.gitignore b/fuzz/.gitignore deleted file mode 100644 index 76f462880e..0000000000 --- a/fuzz/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -build -output diff --git a/fuzz/CMakeLists.txt b/fuzz/CMakeLists.txt deleted file mode 100644 index 735c96ff99..0000000000 --- a/fuzz/CMakeLists.txt +++ /dev/null @@ -1,18 +0,0 @@ -option(ENABLE_FUZZING "Create executables and targets for fuzzing aktualizr with afl." Off) -if (ENABLE_FUZZING) - find_program(AFL_FUZZ afl-fuzz) - if ("${AFL_FUZZ}" MATCHES "AFL_FUZZ-NOTFOUND") - message(FATAL_ERROR "Couldn't find afl-fuzz.") - endif() - - add_executable(afl afl.cc) - target_link_libraries(afl aktualizr_static_lib ${AKTUALIZR_EXTERNAL_LIBS}) - - if (NOT ENABLE_SANITIZERS) - message(FATAL_ERROR "Enable sanitizers with -DENABLE_SANITIZERS=On to do fuzzing.") - endif() - - add_custom_target(fuzz - COMMAND "AFL_SKIP_CPUFREQ=1" "${AFL_FUZZ}" -i "${CMAKE_CURRENT_SOURCE_DIR}/input" -o "${CMAKE_CURRENT_SOURCE_DIR}/output" -m 200 -- "${CMAKE_CURRENT_BINARY_DIR}/afl" "@@" - DEPENDS afl) -endif() diff --git a/fuzz/afl.cc b/fuzz/afl.cc deleted file mode 100644 index 71c3d3689b..0000000000 --- a/fuzz/afl.cc +++ /dev/null @@ -1,17 +0,0 @@ -#include -#include -#include -#include - - -int main(int argc, char **argv){ - (void) argc; - (void) argv; - - std::string input; - std::copy(std::istreambuf_iterator(std::cin), std::istreambuf_iterator(), std::back_inserter(input)); - - (void) input; // TODO do something with this input like parse it or pass it to function - - return 0; -} diff --git a/fuzz/afl.sh b/fuzz/afl.sh deleted file mode 100755 index 62c223181a..0000000000 --- a/fuzz/afl.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e - -mkdir -p build output -rm -rf build/* output/* -cd build - -cmake ../.. \ - -DCMAKE_CXX_COMPILER=afl-g++ \ - -DCMAKE_CC_COMPILER=afl-gcc \ - -DENABLE_FUZZING=On \ - -DENABLE_SANITIZERS=On \ - -DBUILD_SHARED_LIBS=Off \ - -DBUILD_OSTREE=ON - -make -j8 fuzz diff --git a/include/libaktualizr-c.h b/include/libaktualizr-c.h index 728fb6f917..4f684fc77e 100644 --- a/include/libaktualizr-c.h +++ b/include/libaktualizr-c.h @@ -1,28 +1,69 @@ #ifndef AKTUALIZR_LIBAKTUALIZRC_H #define AKTUALIZR_LIBAKTUALIZRC_H +#include // for uint8_t + #ifdef __cplusplus -#include "primary/aktualizr.h" +#include "libaktualizr/aktualizr.h" using Campaign = campaign::Campaign; +using Updates = std::vector; +using Target = Uptane::Target; +using StorageTargetHandle = std::ifstream; extern "C" { #else typedef struct Aktualizr Aktualizr; typedef struct Campaign Campaign; +typedef struct Updates Updates; +typedef struct Target Target; +typedef struct StorageTargetHandle StorageTargetHandle; #endif -Aktualizr *Aktualizr_create(const char *config_path); +Aktualizr *Aktualizr_create_from_cfg(Config *cfg); +Aktualizr *Aktualizr_create_from_path(const char *config_path); int Aktualizr_initialize(Aktualizr *a); int Aktualizr_uptane_cycle(Aktualizr *a); void Aktualizr_destroy(Aktualizr *a); -Campaign *Aktualizr_campaign_check(Aktualizr *a); +int Aktualizr_set_signal_handler(Aktualizr *a, void (*handler)(const char* event_name)); + +Campaign *Aktualizr_campaigns_check(Aktualizr *a); int Aktualizr_campaign_accept(Aktualizr *a, Campaign *c); int Aktualizr_campaign_postpone(Aktualizr *a, Campaign *c); int Aktualizr_campaign_decline(Aktualizr *a, Campaign *c); void Aktualizr_campaign_free(Campaign *c); +Updates *Aktualizr_updates_check(Aktualizr *a); +void Aktualizr_updates_free(Updates *u); + +size_t Aktualizr_get_targets_num(Updates *u); +Target *Aktualizr_get_nth_target(Updates *u, size_t n); +const char *Aktualizr_get_target_name(Target *t); +void Aktualizr_free_target_name(const char *n); + +int Aktualizr_download_target(Aktualizr *a, Target *t); + +int Aktualizr_install_target(Aktualizr *a, Target *t); + +int Aktualizr_send_manifest(Aktualizr *a, const char *manifest); +int Aktualizr_send_device_data(Aktualizr *a); + +StorageTargetHandle *Aktualizr_open_stored_target(Aktualizr *a, const Target *t); +size_t Aktualizr_read_stored_target(StorageTargetHandle *handle, uint8_t* buf, size_t size); +int Aktualizr_close_stored_target(StorageTargetHandle *handle); + +typedef enum { + kSuccess = 0, + kAlreadyPaused, + kAlreadyRunning, + kError } +Pause_Status_C; + +Pause_Status_C Aktualizr_pause(Aktualizr *a); +Pause_Status_C Aktualizr_resume(Aktualizr *a); +void Aktualizr_abort(Aktualizr *a); + #ifdef __cplusplus } #endif diff --git a/include/libaktualizr/aktualizr.h b/include/libaktualizr/aktualizr.h new file mode 100644 index 0000000000..5b268212f5 --- /dev/null +++ b/include/libaktualizr/aktualizr.h @@ -0,0 +1,374 @@ +#ifndef AKTUALIZR_H_ +#define AKTUALIZR_H_ + +#include +#include + +#include + +#include "libaktualizr/config.h" +#include "libaktualizr/events.h" +#include "libaktualizr/secondaryinterface.h" + +class SotaUptaneClient; +class INvStorage; + +namespace api { +class CommandQueue; +} + +/** + * This class provides the main APIs necessary for launching and controlling + * libaktualizr. + */ +class Aktualizr { + public: + /** Aktualizr requires a configuration object. Examples can be found in the + * config directory. + * + * @throw SQLException + * @throw boost::filesystem::filesystem_error + * @throw std::bad_alloc (memory allocation failure) + * @throw std::runtime_error (filesystem failure; libsodium initialization failure) + */ + explicit Aktualizr(const Config& config); + + Aktualizr(const Aktualizr&) = delete; + Aktualizr(Aktualizr&&) = delete; + Aktualizr& operator=(const Aktualizr&) = delete; + Aktualizr& operator=(Aktualizr&&) = delete; + ~Aktualizr(); + + /** + * Initialize aktualizr. Any Secondaries should be added before making this + * call. This must be called before using any other aktualizr functions + * except AddSecondary. + * + * Provisioning will be attempted once if it hasn't already been completed. + * If that fails (for example because there is no network), then provisioning + * will be automatically re-attempted ahead of any operation that requires it. + * + * @throw Initializer::Error and subclasses + * @throw SQLException + * @throw boost::filesystem::filesystem_error + * @throw std::bad_alloc (memory allocation failure) + * @throw std::runtime_error (curl, P11, filesystem, credentials archive + * parsing, and certificate parsing failures; + * missing ECU serials or device ID; database + * inconsistency with pending updates; invalid + * OSTree deployment) + * @throw std::system_error (failure to lock a mutex) + */ + void Initialize(); + + /** + * Asynchronously run aktualizr indefinitely until Shutdown is called. + * @return Empty std::future object + * + * @throw SQLException + * @throw boost::filesystem::filesystem_error + * @throw std::bad_alloc (memory allocation failure) + * @throw std::runtime_error (curl and filesystem failures; database + * inconsistency with pending updates; error + * getting metadata from database or filesystem) + * @throw std::system_error (failure to lock a mutex) + */ + std::future RunForever(); + + /** + * Shuts down currently running `RunForever()` method + * + * @throw std::system_error (failure to lock a mutex) + */ + void Shutdown(); + + /** + * Check for campaigns. + * Campaigns are a concept outside of Uptane, and allow for user approval of + * updates before the contents of the update are known. + * @return std::future object with data about available campaigns. + * + * @throw std::bad_alloc (memory allocation failure) + * @throw std::runtime_error (curl failure) + * @throw SotaUptaneClient::ProvisioningFailed (on-line provisioning failed) + */ + std::future CampaignCheck(); + + /** + * Act on campaign: accept, decline or postpone. + * Accepted campaign will be removed from the campaign list but no guarantee + * is made for declined or postponed items. Applications are responsible for + * tracking their state but this method will notify the server for device + * state monitoring purposes. + * @param campaign_id Campaign ID as provided by CampaignCheck. + * @param cmd action to apply on the campaign: accept, decline or postpone + * @return Empty std::future object + * + * @throw std::bad_alloc (memory allocation failure) + * @throw std::system_error (failure to lock a mutex) + */ + std::future CampaignControl(const std::string& campaign_id, campaign::Cmd cmd); + + /** + * Send local device data to the server. + * This includes network status, installed packages, hardware etc. + * @return Empty std::future object + * + * @throw SQLException + * @throw boost::filesystem::filesystem_error + * @throw std::bad_alloc (memory allocation failure) + * @throw std::runtime_error (curl and filesystem failures) + * @throw std::system_error (failure to lock a mutex) + * @throw SotaUptaneClient::ProvisioningFailed (on-line provisioning failed) + */ + std::future SendDeviceData(); + + /** + * Fetch Uptane metadata and check for updates. + * This collects a client manifest, PUTs it to the director, updates the + * Uptane metadata (including root and targets), and then checks the metadata + * for target updates. + * @return Information about available updates. + * + * @throw SQLException + * @throw boost::filesystem::filesystem_error + * @throw std::bad_alloc (memory allocation failure) + * @throw std::runtime_error (curl and filesystem failures; database + * inconsistency with pending updates) + * @throw std::system_error (failure to lock a mutex) + * @throw SotaUptaneClient::ProvisioningFailed (on-line provisioning failed) + */ + std::future CheckUpdates(); + + /** + * Download targets. + * @param updates Vector of targets to download as provided by CheckUpdates. + * @return std::future object with information about download results. + * + * @throw SQLException + * @throw std::bad_alloc (memory allocation failure) + * @throw std::system_error (failure to lock a mutex) + * @throw SotaUptaneClient::NotProvisionedYet (called before provisioning complete) + */ + std::future Download(const std::vector& updates); + + struct InstallationLogEntry { + Uptane::EcuSerial ecu; + std::vector installs; + }; + using InstallationLog = std::vector; + + /** + * Get log of installations. The log is indexed for every ECU and contains + * every change of versions ordered by time. It may contain duplicates in + * case of rollbacks. + * @return installation log + * + * @throw SQLException + * @throw std::bad_alloc (memory allocation failure) + * @throw std::runtime_error (failure to load ECU serials) + */ + InstallationLog GetInstallationLog(); + + /** + * Get list of targets currently in storage. This is intended to be used with + * DeleteStoredTarget and targets are not guaranteed to be verified and + * up-to-date with current metadata. + * @return std::vector of target objects + * + * @throw SQLException + * @throw std::bad_alloc (memory allocation failure) + * @throw std::runtime_error (error getting targets from database) + */ + std::vector GetStoredTargets(); + + /** + * Delete a stored target from storage. This only affects storage of the + * actual binary data and does not preclude a re-download if a target matches + * current metadata. + * @param target Target object matching the desired target in the storage + * @return true if successful + * + * @throw SQLException + * @throw std::runtime_error (error getting targets from database or filesystem) + */ + void DeleteStoredTarget(const Uptane::Target& target); + + /** + * Get target downloaded in Download call. Returned target is guaranteed to be verified and up-to-date + * according to the Uptane metadata downloaded in CheckUpdates call. + * @param target Target object matching the desired target in the storage. + * @return Handle to the stored binary. nullptr if none is found. + * + * @throw SQLException + * @throw std::runtime_error (error getting targets from database or filesystem) + */ + std::ifstream OpenStoredTarget(const Uptane::Target& target); + + /** + * Install targets. + * @param updates Vector of targets to install as provided by CheckUpdates or + * Download. + * @return std::future object with information about installation results. + * + * @throw SQLException + * @throw std::bad_alloc (memory allocation failure) + * @throw std::runtime_error (error getting metadata from database or filesystem) + * @throw std::system_error (failure to lock a mutex) + * @throw SotaUptaneClient::NotProvisionedYet (called before provisioning complete) + */ + std::future Install(const std::vector& updates); + + /** + * SetInstallationRawReport allows setting a custom raw report field in the device installation result. + * + * @note An invocation of this method will have effect only after call of Aktualizr::Install and before calling + * Aktualizr::SendManifest member function. + * @param custom_raw_report is intended to replace a default value in the device installation report. + * @return true if the custom raw report was successfully applied to the device installation result. + * If there is no installation report in the storage the function will always return false. + * + * @throw SQLException + */ + bool SetInstallationRawReport(const std::string& custom_raw_report); + + /** + * Send installation report to the backend. + * + * @note The device manifest is also sent as a part of CheckUpdates and + * SendDeviceData calls, as well as after a reboot if it was initiated + * by Aktualizr as a part of an installation process. + * All these manifests will not include the custom data provided in this call. + * + * @param custom Project-specific data to put in the custom field of Uptane manifest + * @return std::future object with manifest update result (true on success). + * + * @throw SQLException + * @throw std::bad_alloc (memory allocation failure) + * @throw std::runtime_error (curl failure; database inconsistency with pending updates) + * @throw std::system_error (failure to lock a mutex) + * @throw SotaUptaneClient::ProvisioningFailed (on-line provisioning failed) + */ + std::future SendManifest(const Json::Value& custom = Json::nullValue); + + /** + * Pause the library operations. + * In progress target downloads will be paused and API calls will be deferred. + * + * @return Information about pause results. + * + * @throw std::bad_alloc (memory allocation failure) + * @throw std::system_error (failure to lock a mutex) + */ + result::Pause Pause(); + + /** + * Resume the library operations. + * Target downloads will resume and API calls issued during the pause will + * execute in fifo order. + * + * @return Information about resume results. + * + * @throw std::bad_alloc (memory allocation failure) + * @throw std::system_error (failure to lock a mutex) + */ + result::Pause Resume(); + + /** + * Aborts the currently running command, if it can be aborted, or waits for it + * to finish; then removes all other queued calls. + * This doesn't reset the `Paused` state, i.e. if the queue was previously + * paused, it will remain paused, but with an emptied queue. + * The call is blocking. + * + * @throw std::system_error (failure to lock a mutex) + */ + void Abort(); + + /** + * Synchronously run an Uptane cycle: check for updates, download any new + * targets, install them, and send a manifest back to the server. + * + * @return `false`, if the restart is required to continue, `true` otherwise + * + * @throw SQLException + * @throw boost::filesystem::filesystem_error + * @throw std::bad_alloc (memory allocation failure) + * @throw std::runtime_error (curl and filesystem failures; database + * inconsistency with pending updates; error + * getting metadata from database or filesystem) + * @throw std::system_error (failure to lock a mutex) + * @throw SotaUptaneClient::ProvisioningFailed (on-line provisioning failed) + */ + bool UptaneCycle(); + + /** + * Add new Secondary to aktualizr. Must be called before Initialize. + * @param secondary An object to perform installation on a Secondary ECU. + * + * @throw std::bad_alloc (memory allocation failure) + * @throw std::runtime_error (multiple Secondaries with the same serial) + */ + void AddSecondary(const std::shared_ptr& secondary); + + /** + * Store some free-form data to be associated with a particular Secondary, to + * be retrieved later through `GetSecondaries` + * + * @throw SQLException + */ + void SetSecondaryData(const Uptane::EcuSerial& ecu, const std::string& data); + + /** + * Returns a list of the registered Secondaries, along with some associated + * metadata + * + * @return vector of SecondaryInfo objects + * + * @throw SQLException + * @throw std::bad_alloc (memory allocation failure) + */ + std::vector GetSecondaries() const; + + /** + * Override the contents of the report sent to the /system_info endpoint. + * If this isn't called (or if hwinfo.empty()) then the output of lshw is + * sent instead + * @param hwinfo The replacement for lshw -json + */ + void SetCustomHardwareInfo(Json::Value hwinfo); + + // The type proxy is needed in doxygen 1.8.16 because of this bug + // https://github.com/doxygen/doxygen/issues/7236 + using SigHandler = std::function)>; + + /** + * Provide a function to receive event notifications. + * @param handler a function that can receive event objects. + * @return a signal connection object, which can be disconnected if desired. + */ + boost::signals2::connection SetSignalHandler(const SigHandler& handler); + + private: + // Make sure this is declared before SotaUptaneClient to prevent Valgrind + // complaints with destructors. + Config config_; + + protected: + Aktualizr(Config config, std::shared_ptr storage_in, const std::shared_ptr& http_in); + + std::shared_ptr uptane_client_; + + private: + struct { + std::mutex m; + std::condition_variable cv; + bool flag = false; + } exit_cond_; + + std::shared_ptr storage_; + std::shared_ptr sig_; + std::unique_ptr api_queue_; +}; + +#endif // AKTUALIZR_H_ diff --git a/include/libaktualizr/campaign.h b/include/libaktualizr/campaign.h new file mode 100644 index 0000000000..0a16acb097 --- /dev/null +++ b/include/libaktualizr/campaign.h @@ -0,0 +1,60 @@ +#ifndef CAMPAIGN_CAMPAIGN_H_ +#define CAMPAIGN_CAMPAIGN_H_ + +#include +#include +#include + +#include "json/json.h" + +class HttpInterface; + +namespace campaign { + +constexpr int64_t kMaxCampaignsMetaSize = 1024 * 1024; + +class CampaignParseError : std::exception { + public: + const char *what() const noexcept override { return "Could not parse Campaign metadata"; } +}; + +//! @cond Doxygen_Suppress +// Annoying bug in doxygen 1.8.13 +// Looks like it's fixed in later versions: https://sourceforge.net/p/doxygen/mailman/message/35481387/ +enum class Cmd { + Accept, + Decline, + Postpone, +}; +///! @endcond + +// NOLINTNEXTLINE(clang-diagnostic-unused-function) +static inline Cmd cmdFromName(const std::string &name) { + return std::map{ + {"campaign_accept", Cmd::Accept}, {"campaign_decline", Cmd::Decline}, {"campaign_postpone", Cmd::Postpone}} + .at(name); +} + +// Out of Uptane concept: update campaign for a device +class Campaign { + public: + static std::vector campaignsFromJson(const Json::Value &json); + static void JsonFromCampaigns(const std::vector &in, Json::Value &out); + static std::vector fetchAvailableCampaigns(HttpInterface &http_client, const std::string &tls_server); + + Campaign() = default; + explicit Campaign(const Json::Value &json); + void getJson(Json::Value &out) const; + + std::string id; + std::string name; + int64_t size{0}; + bool autoAccept{false}; + std::string description; + int estInstallationDuration{0}; + int estPreparationDuration{0}; +}; + +} // namespace campaign + +#endif diff --git a/include/libaktualizr/config.h b/include/libaktualizr/config.h new file mode 100644 index 0000000000..da5110dce7 --- /dev/null +++ b/include/libaktualizr/config.h @@ -0,0 +1,244 @@ +#ifndef CONFIG_H_ +#define CONFIG_H_ + +#include +#include +#include +#include + +#include +#include +#include + +#include "libaktualizr/types.h" + +// Try to keep the order of config options the same as in Config::writeToStream() +// and Config::updateFromPropertyTree() in config.cc. + +struct LoggerConfig { + int loglevel{2}; + void updateFromPropertyTree(const boost::property_tree::ptree& pt); + void writeToStream(std::ostream& out_stream) const; +}; + +// declare p11 types as incomplete so that the header can be used without libp11 +struct PKCS11_ctx_st; +struct PKCS11_slot_st; + +struct P11Config { + boost::filesystem::path module; + std::string pass; + std::string label; + std::string uptane_key_id; + std::string tls_cacert_id; + std::string tls_pkey_id; + std::string tls_clientcert_id; + + void updateFromPropertyTree(const boost::property_tree::ptree& pt); + void writeToStream(std::ostream& out_stream) const; +}; + +struct TlsConfig { + std::string server; + boost::filesystem::path server_url_path; + CryptoSource ca_source{CryptoSource::kFile}; + CryptoSource pkey_source{CryptoSource::kFile}; + CryptoSource cert_source{CryptoSource::kFile}; + + void updateFromPropertyTree(const boost::property_tree::ptree& pt); + void writeToStream(std::ostream& out_stream) const; +}; + +struct ProvisionConfig { + std::string server; + std::string p12_password; + std::string expiry_days{"36000"}; + boost::filesystem::path provision_path; + ProvisionMode mode{ProvisionMode::kDefault}; + std::string device_id; + std::string primary_ecu_serial; + std::string primary_ecu_hardware_id; + std::string ecu_registration_endpoint; + + void updateFromPropertyTree(const boost::property_tree::ptree& pt); + void writeToStream(std::ostream& out_stream) const; +}; + +struct UptaneConfig { + uint64_t polling_sec{300U}; + std::string director_server; + std::string repo_server; + CryptoSource key_source{CryptoSource::kFile}; + KeyType key_type{KeyType::kRSA2048}; + bool force_install_completion{false}; + boost::filesystem::path secondary_config_file; + uint64_t secondary_preinstall_wait_sec{600U}; + + void updateFromPropertyTree(const boost::property_tree::ptree& pt); + void writeToStream(std::ostream& out_stream) const; +}; + +// TODO: move these to their corresponding headers +#define PACKAGE_MANAGER_NONE "none" +#define PACKAGE_MANAGER_OSTREE "ostree" + +#ifdef BUILD_OSTREE +#define PACKAGE_MANAGER_DEFAULT PACKAGE_MANAGER_OSTREE +#else +#define PACKAGE_MANAGER_DEFAULT PACKAGE_MANAGER_NONE +#endif + +struct PackageConfig { + std::string type{PACKAGE_MANAGER_DEFAULT}; + + // OSTree options + std::string os; + boost::filesystem::path sysroot; + std::string ostree_server; + boost::filesystem::path images_path{"/var/sota/images"}; + boost::filesystem::path packages_file{"/usr/package.manifest"}; + + // Options for simulation + bool fake_need_reboot{false}; + BootedType booted{BootedType::kBooted}; + + // for specialized configuration + std::map extra; + + void updateFromPropertyTree(const boost::property_tree::ptree& pt); + void writeToStream(std::ostream& out_stream) const; +}; + +struct StorageConfig { + StorageType type{StorageType::kSqlite}; + boost::filesystem::path path{"/var/sota"}; + + // FS storage + utils::BasedPath uptane_metadata_path{"metadata"}; + utils::BasedPath uptane_private_key_path{"ecukey.der"}; + utils::BasedPath uptane_public_key_path{"ecukey.pub"}; + utils::BasedPath tls_cacert_path{"root.crt"}; + utils::BasedPath tls_pkey_path{"pkey.pem"}; + utils::BasedPath tls_clientcert_path{"client.pem"}; + + // SQLite storage + utils::BasedPath sqldb_path{"sql.db"}; // based on `/var/sota` + + void updateFromPropertyTree(const boost::property_tree::ptree& pt); + void writeToStream(std::ostream& out_stream) const; +}; + +struct ImportConfig { + boost::filesystem::path base_path{"/var/sota/import"}; + utils::BasedPath uptane_private_key_path{""}; + utils::BasedPath uptane_public_key_path{""}; + utils::BasedPath tls_cacert_path{""}; + utils::BasedPath tls_pkey_path{""}; + utils::BasedPath tls_clientcert_path{""}; + + void updateFromPropertyTree(const boost::property_tree::ptree& pt); + void writeToStream(std::ostream& out_stream) const; +}; + +/** + * @brief The TelemetryConfig struct + * Report device network information: IP address, hostname, MAC address. + */ +struct TelemetryConfig { + bool report_network{true}; + bool report_config{true}; + void updateFromPropertyTree(const boost::property_tree::ptree& pt); + void writeToStream(std::ostream& out_stream) const; +}; + +enum class RollbackMode { kBootloaderNone = 0, kUbootGeneric, kUbootMasked, kFioVB }; +std::ostream& operator<<(std::ostream& os, RollbackMode mode); + +struct BootloaderConfig { + RollbackMode rollback_mode{RollbackMode::kBootloaderNone}; + boost::filesystem::path reboot_sentinel_dir{"/var/run/aktualizr-session"}; + boost::filesystem::path reboot_sentinel_name{"need_reboot"}; + std::string reboot_command{"/sbin/reboot"}; + + void updateFromPropertyTree(const boost::property_tree::ptree& pt); + void writeToStream(std::ostream& out_stream) const; +}; + +// bundle some parts of the main config together +// Should be derived by calling Config::keymanagerConfig() +struct KeyManagerConfig { + KeyManagerConfig() = delete; // only allow construction by initializer list + P11Config p11; + CryptoSource tls_ca_source; + CryptoSource tls_pkey_source; + CryptoSource tls_cert_source; + KeyType uptane_key_type; + CryptoSource uptane_key_source; +}; + +/** + * @brief The BaseConfig class + */ +class BaseConfig { + public: + BaseConfig() = default; + virtual ~BaseConfig() = default; + void updateFromToml(const boost::filesystem::path& filename); + virtual void updateFromPropertyTree(const boost::property_tree::ptree& pt) = 0; + + protected: + BaseConfig(const BaseConfig&) = default; + BaseConfig(BaseConfig&&) = default; + BaseConfig& operator=(const BaseConfig&) = default; + BaseConfig& operator=(BaseConfig&&) = default; + + void updateFromDirs(const std::vector& configs); + + static void checkDirs(const std::vector& configs); + + std::vector config_dirs_ = {"/usr/lib/sota/conf.d", "/var/sota/sota.toml", + "/etc/sota/conf.d/"}; +}; + +/** + * Configuration object for an aktualizr instance running on a Primary ECU. + * + * This class is a parent to a series of smaller configuration objects for + * specific subsystems. Note that most other aktualizr-related tools have their + * own parent configuration objects with a reduced set of members. + */ +class Config : public BaseConfig { + public: + Config(); + explicit Config(const boost::program_options::variables_map& cmd); + explicit Config(const boost::filesystem::path& filename); + explicit Config(const std::vector& config_dirs); + + KeyManagerConfig keymanagerConfig() const; + + void updateFromTomlString(const std::string& contents); + void postUpdateValues(); + void writeToStream(std::ostream& sink) const; + + // Config data structures. Keep logger first so that it is taken into account + // while processing the others. + LoggerConfig logger; + P11Config p11; + TlsConfig tls; + ProvisionConfig provision; + UptaneConfig uptane; + PackageConfig pacman; + StorageConfig storage; + ImportConfig import; + TelemetryConfig telemetry; + BootloaderConfig bootloader; + + private: + void updateFromPropertyTree(const boost::property_tree::ptree& pt) override; + void updateFromCommandLine(const boost::program_options::variables_map& cmd); + bool loglevel_from_cmdline{false}; +}; + +std::ostream& operator<<(std::ostream& os, const Config& cfg); + +#endif // CONFIG_H_ diff --git a/src/libaktualizr/primary/events.h b/include/libaktualizr/events.h similarity index 94% rename from src/libaktualizr/primary/events.h rename to include/libaktualizr/events.h index fef0893f24..4f59f74a7e 100644 --- a/src/libaktualizr/primary/events.h +++ b/include/libaktualizr/events.h @@ -7,10 +7,7 @@ #include -#include "primary/results.h" -#include "uptane/fetcher.h" -#include "uptane/tuf.h" -#include "utilities/types.h" +#include "libaktualizr/results.h" /** * Aktualizr status events. @@ -23,8 +20,12 @@ namespace event { class BaseEvent { public: BaseEvent() = default; - BaseEvent(std::string variant_in) : variant(std::move(variant_in)) {} + explicit BaseEvent(std::string variant_in) : variant(std::move(variant_in)) {} virtual ~BaseEvent() = default; + BaseEvent(const BaseEvent&) = default; + BaseEvent(BaseEvent&&) = default; + BaseEvent& operator=(const BaseEvent&) = default; + BaseEvent& operator=(BaseEvent&&) = default; template bool isTypeOf() { @@ -76,7 +77,6 @@ class DownloadProgressReport : public BaseEvent { return (report.progress == DownloadProgressReport::ProgressCompletedValue); } - public: DownloadProgressReport(Uptane::Target target_in, std::string description_in, unsigned int progress_in) : target{std::move(target_in)}, description{std::move(description_in)}, progress{progress_in} { variant = TypeName; diff --git a/include/libaktualizr/packagemanagerfactory.h b/include/libaktualizr/packagemanagerfactory.h new file mode 100644 index 0000000000..b6c70cf84a --- /dev/null +++ b/include/libaktualizr/packagemanagerfactory.h @@ -0,0 +1,38 @@ +#ifndef PACKAGEMANAGERFACTORY_H_ +#define PACKAGEMANAGERFACTORY_H_ + +#include "libaktualizr/config.h" +#include "libaktualizr/packagemanagerinterface.h" + +class INvStorage; + +using PackageManagerBuilder = + std::function&, const std::shared_ptr&)>; + +class PackageManagerFactory { + public: + static bool registerPackageManager(const char* name, PackageManagerBuilder builder); + static std::shared_ptr makePackageManager(const PackageConfig& pconfig, + const BootloaderConfig& bconfig, + const std::shared_ptr& storage, + const std::shared_ptr& http); +}; + +// macro to auto-register a package manager +// note that static library users will have to call `registerPackageManager` manually + +#define AUTO_REGISTER_PACKAGE_MANAGER(name, clsname) \ + class clsname##_PkgMRegister_ { \ + public: \ + clsname##_PkgMRegister_() { \ + PackageManagerFactory::registerPackageManager( \ + name, [](const PackageConfig& pconfig, const BootloaderConfig& bconfig, \ + const std::shared_ptr& storage, const std::shared_ptr& http) { \ + return new clsname(pconfig, bconfig, storage, http); \ + }); \ + } \ + }; \ + static clsname##_PkgMRegister_ clsname##_register_ + +#endif // PACKAGEMANAGERFACTORY_H_ diff --git a/include/libaktualizr/packagemanagerinterface.h b/include/libaktualizr/packagemanagerinterface.h new file mode 100644 index 0000000000..1e3bcf9456 --- /dev/null +++ b/include/libaktualizr/packagemanagerinterface.h @@ -0,0 +1,79 @@ +#ifndef PACKAGEMANAGERINTERFACE_H_ +#define PACKAGEMANAGERINTERFACE_H_ + +#include +#include +#include + +#include "libaktualizr/config.h" + +class Bootloader; +class HttpInterface; +class KeyManager; +class INvStorage; + +namespace api { +class FlowControlToken; +} + +namespace Uptane { +class Fetcher; +} + +using FetcherProgressCb = std::function; + +/** + * Status of downloaded target. + */ +enum class TargetStatus { + /* Target has been downloaded and verified. */ + kGood = 0, + /* Target was not found. */ + kNotFound, + /* Target was found, but is incomplete. */ + kIncomplete, + /* Target was found, but is larger than expected. */ + kOversized, + /* Target was found, but hash did not match the metadata. */ + kHashMismatch, + /* Target was found and has valid metadata but the content is not suitable for the packagemanager */ + kInvalid, +}; + +class PackageManagerInterface { + public: + PackageManagerInterface(PackageConfig pconfig, const BootloaderConfig& bconfig, std::shared_ptr storage, + std::shared_ptr http) + : config(std::move(pconfig)), storage_(std::move(storage)), http_(std::move(http)) { + (void)bconfig; + } + virtual ~PackageManagerInterface() = default; + PackageManagerInterface(const PackageManagerInterface&) = delete; + PackageManagerInterface(PackageManagerInterface&&) = delete; + PackageManagerInterface& operator=(const PackageManagerInterface&) = delete; + PackageManagerInterface& operator=(PackageManagerInterface&&) = delete; + virtual std::string name() const = 0; + virtual Json::Value getInstalledPackages() const = 0; + virtual Uptane::Target getCurrent() const = 0; + virtual data::InstallationResult install(const Uptane::Target& target) const = 0; + virtual void completeInstall() const { throw std::runtime_error("Unimplemented"); } + virtual data::InstallationResult finalizeInstall(const Uptane::Target& target) = 0; + virtual void updateNotify() {} + virtual void installNotify(const Uptane::Target& target) { (void)target; } + virtual bool fetchTarget(const Uptane::Target& target, Uptane::Fetcher& fetcher, const KeyManager& keys, + const FetcherProgressCb& progress_cb, const api::FlowControlToken* token); + virtual TargetStatus verifyTarget(const Uptane::Target& target) const; + virtual bool checkAvailableDiskSpace(uint64_t required_bytes) const; + virtual boost::optional> checkTargetFile(const Uptane::Target& target) const; + virtual std::ofstream createTargetFile(const Uptane::Target& target); + virtual std::ofstream appendTargetFile(const Uptane::Target& target); + virtual std::ifstream openTargetFile(const Uptane::Target& target) const; + virtual void removeTargetFile(const Uptane::Target& target); + virtual std::vector getTargetFiles(); + + protected: + PackageConfig config; + std::shared_ptr storage_; + std::shared_ptr http_; +}; +#endif // PACKAGEMANAGERINTERFACE_H_ diff --git a/src/libaktualizr/primary/results.h b/include/libaktualizr/results.h similarity index 89% rename from src/libaktualizr/primary/results.h rename to include/libaktualizr/results.h index f78ef7c29f..8eb3316c63 100644 --- a/src/libaktualizr/primary/results.h +++ b/include/libaktualizr/results.h @@ -5,9 +5,8 @@ #include #include -#include "campaign/campaign.h" -#include "uptane/fetcher.h" -#include "uptane/tuf.h" +#include "libaktualizr/campaign.h" +#include "libaktualizr/types.h" /** Results of libaktualizr API calls. */ namespace result { @@ -32,6 +31,8 @@ enum class UpdateStatus { kError, }; +std::ostream& operator<<(std::ostream& os, UpdateStatus update_status); + /** * Container for information about available updates. */ @@ -39,11 +40,11 @@ class UpdateCheck { public: UpdateCheck() = default; UpdateCheck(std::vector updates_in, unsigned int ecus_count_in, UpdateStatus status_in, - const Json::Value &targets_meta_in, std::string message_in) + Json::Value targets_meta_in, std::string message_in) : updates(std::move(updates_in)), ecus_count(ecus_count_in), status(status_in), - targets_meta(targets_meta_in), + targets_meta(std::move(targets_meta_in)), message(std::move(message_in)) {} std::vector updates; unsigned int ecus_count{0}; @@ -69,6 +70,7 @@ enum class PauseStatus { class Pause { public: Pause() = default; + // NOLINTNEXTLINE(google-explicit-constructor, hicpp-explicit-conversions) Pause(PauseStatus status_in) : status(status_in) {} PauseStatus status{PauseStatus::kSuccess}; @@ -88,6 +90,8 @@ enum class DownloadStatus { kError, }; +std::ostream& operator<<(std::ostream& os, DownloadStatus stat); + /** * Container for information about downloading an update. */ diff --git a/include/libaktualizr/secondary_provider.h b/include/libaktualizr/secondary_provider.h new file mode 100644 index 0000000000..35fdc3a6e3 --- /dev/null +++ b/include/libaktualizr/secondary_provider.h @@ -0,0 +1,34 @@ +#ifndef UPTANE_SECONDARY_PROVIDER_H +#define UPTANE_SECONDARY_PROVIDER_H + +#include + +#include "libaktualizr/config.h" +#include "libaktualizr/packagemanagerinterface.h" +#include "libaktualizr/types.h" + +class INvStorage; + +class SecondaryProviderBuilder; + +class SecondaryProvider { + public: + friend class SecondaryProviderBuilder; + + bool getMetadata(Uptane::MetaBundle* meta_bundle, const Uptane::Target& target) const; + bool getDirectorMetadata(Uptane::MetaBundle* meta_bundle) const; + bool getImageRepoMetadata(Uptane::MetaBundle* meta_bundle, const Uptane::Target& target) const; + std::string getTreehubCredentials() const; + std::ifstream getTargetFileHandle(const Uptane::Target& target) const; + + private: + SecondaryProvider(Config& config_in, std::shared_ptr storage_in, + std::shared_ptr package_manager_in) + : config_(config_in), storage_(std::move(storage_in)), package_manager_(std::move(package_manager_in)) {} + + Config& config_; + const std::shared_ptr storage_; + const std::shared_ptr package_manager_; +}; + +#endif // UPTANE_SECONDARY_PROVIDER_H diff --git a/include/libaktualizr/secondaryinterface.h b/include/libaktualizr/secondaryinterface.h new file mode 100644 index 0000000000..80654a4fc2 --- /dev/null +++ b/include/libaktualizr/secondaryinterface.h @@ -0,0 +1,40 @@ +#ifndef UPTANE_SECONDARYINTERFACE_H +#define UPTANE_SECONDARYINTERFACE_H + +#include + +#include "libaktualizr/secondary_provider.h" +#include "libaktualizr/types.h" + +class SecondaryInterface { + public: + SecondaryInterface() = default; + virtual ~SecondaryInterface() = default; + + using Ptr = std::shared_ptr; + + virtual void init(std::shared_ptr secondary_provider_in) = 0; + virtual std::string Type() const = 0; + virtual Uptane::EcuSerial getSerial() const = 0; + virtual Uptane::HardwareIdentifier getHwId() const = 0; + virtual PublicKey getPublicKey() const = 0; + + virtual Uptane::Manifest getManifest() const = 0; + virtual data::InstallationResult putMetadata(const Uptane::Target& target) = 0; + virtual bool ping() const = 0; + + // return 0 during initialization and -1 for error. + virtual int32_t getRootVersion(bool director) const = 0; + virtual data::InstallationResult putRoot(const std::string& root, bool director) = 0; + + virtual data::InstallationResult sendFirmware(const Uptane::Target& target) = 0; + virtual data::InstallationResult install(const Uptane::Target& target) = 0; + + protected: + SecondaryInterface(const SecondaryInterface&) = default; + SecondaryInterface(SecondaryInterface&&) = default; + SecondaryInterface& operator=(const SecondaryInterface&) = default; + SecondaryInterface& operator=(SecondaryInterface&&) = default; +}; + +#endif // UPTANE_SECONDARYINTERFACE_H diff --git a/include/libaktualizr/types.h b/include/libaktualizr/types.h new file mode 100644 index 0000000000..c7df84c89b --- /dev/null +++ b/include/libaktualizr/types.h @@ -0,0 +1,520 @@ +#ifndef TYPES_H_ +#define TYPES_H_ +/** \file */ + +#include +#include +#include + +#include + +#include "json/json.h" + +// kSharedCredReuse is intended solely for testing. It should not be used in +// production. +enum class ProvisionMode { kSharedCred = 0, kDeviceCred, kSharedCredReuse, kDefault }; +std::ostream &operator<<(std::ostream &os, ProvisionMode mode); + +enum class StorageType { kFileSystem = 0, kSqlite }; +std::ostream &operator<<(std::ostream &os, StorageType stype); + +enum class BootedType { kBooted = 0, kStaged }; +std::ostream &operator<<(std::ostream &os, BootedType btype); + +enum class VerificationType { + kFull = 0, + kTuf + // TODO: kPartial +}; +std::ostream &operator<<(std::ostream &os, VerificationType vtype); + +namespace utils { +/** + * @brief The BasedPath class + * Can represent an absolute or relative path, only readable through the BasePath::get() method. + * + * The intent is to avoid unintentional use of the "naked" relative path by + * mandating a base directory for each instantiation. + */ +class BasedPath { + public: + // NOLINTNEXTLINE(google-explicit-constructor, hicpp-explicit-conversions) + BasedPath(boost::filesystem::path p) : p_(std::move(p)) {} + boost::filesystem::path get(const boost::filesystem::path &base) const; + bool empty() const { return p_.empty(); } + bool operator==(const BasedPath &b) const { return p_ == b.p_; } + bool operator!=(const BasedPath &b) const { return !(*this == b); } + + private: + boost::filesystem::path p_; +}; + +} // namespace utils + +// Keep these in sync with AKIpUptaneKeyType ASN.1 definitions. +enum class KeyType { + kED25519 = 0, + kFirstKnown = kED25519, + kRSA2048, + kRSA3072, + kRSA4096, + kLastKnown = kRSA4096, + kUnknown = 0xff +}; + +inline std::ostream &operator<<(std::ostream &os, const KeyType kt) { + std::string kt_str; + switch (kt) { + case KeyType::kRSA2048: + kt_str = "RSA2048"; + break; + case KeyType::kRSA3072: + kt_str = "RSA3072"; + break; + case KeyType::kRSA4096: + kt_str = "RSA4096"; + break; + case KeyType::kED25519: + kt_str = "ED25519"; + break; + default: + kt_str = "unknown"; + break; + } + os << '"' << kt_str << '"'; + return os; +} + +inline std::istream &operator>>(std::istream &is, KeyType &kt) { + std::string kt_str; + + is >> kt_str; + std::transform(kt_str.begin(), kt_str.end(), kt_str.begin(), ::toupper); + kt_str.erase(std::remove(kt_str.begin(), kt_str.end(), '"'), kt_str.end()); + + if (kt_str == "RSA2048") { + kt = KeyType::kRSA2048; + } else if (kt_str == "RSA3072") { + kt = KeyType::kRSA3072; + } else if (kt_str == "RSA4096") { + kt = KeyType::kRSA4096; + } else if (kt_str == "ED25519") { + kt = KeyType::kED25519; + } else { + kt = KeyType::kUnknown; + } + return is; +} + +enum class CryptoSource { kFile = 0, kPkcs11 }; + +inline std::ostream &operator<<(std::ostream &os, CryptoSource cs) { + std::string cs_str; + switch (cs) { + case CryptoSource::kFile: + cs_str = "file"; + break; + case CryptoSource::kPkcs11: + cs_str = "pkcs11"; + break; + default: + cs_str = "unknown"; + break; + } + os << '"' << cs_str << '"'; + return os; +} + +class PublicKey { + public: + PublicKey() = default; + explicit PublicKey(const boost::filesystem::path &path); + + explicit PublicKey(const Json::Value &uptane_json); + + PublicKey(const std::string &value, KeyType type); + + std::string Value() const { return value_; } + + KeyType Type() const { return type_; } + /** + * Verify a signature using this public key + */ + bool VerifySignature(const std::string &signature, const std::string &message) const; + /** + * Uptane Json representation of this public key. Used in root.json + * and during provisioning. + */ + Json::Value ToUptane() const; + + std::string KeyId() const; + bool operator==(const PublicKey &rhs) const; + + bool operator!=(const PublicKey &rhs) const { return !(*this == rhs); } + + private: + // std::string can be implicitly converted to a Json::Value. Make sure that + // the Json::Value constructor is not called accidentally. + PublicKey(std::string); // NOLINT(google-explicit-constructor, hicpp-explicit-conversions) + std::string value_; + KeyType type_{KeyType::kUnknown}; +}; + +/** + * @brief The Hash class The hash of a file or Uptane metadata. + * File hashes/checksums in Uptane include the length of the object, + * in order to defeat infinite download attacks. + */ +class Hash { + public: + // order corresponds algorithm priority + enum class Type { kSha256, kSha512, kUnknownAlgorithm }; + + static Hash generate(Type type, const std::string &data); + Hash(const std::string &type, const std::string &hash); + Hash(Type type, const std::string &hash); + + bool HaveAlgorithm() const { return type_ != Type::kUnknownAlgorithm; } + bool operator==(const Hash &other) const; + bool operator!=(const Hash &other) const { return !operator==(other); } + static std::string TypeString(Type type); + std::string TypeString() const; + Type type() const; + std::string HashString() const { return hash_; } + friend std::ostream &operator<<(std::ostream &os, const Hash &h); + + static std::string encodeVector(const std::vector &hashes); + static std::vector decodeVector(std::string hashes_str); + + private: + Type type_; + std::string hash_; +}; + +std::ostream &operator<<(std::ostream &os, const Hash &h); + +// timestamp, compatible with tuf +class TimeStamp { + public: + static TimeStamp Now(); + static struct tm CurrentTime(); + /** An invalid TimeStamp */ + TimeStamp() = default; + explicit TimeStamp(std::string rfc3339); + explicit TimeStamp(struct tm time); + bool IsExpiredAt(const TimeStamp &now) const; + bool IsValid() const; + std::string ToString() const { return time_; } + bool operator<(const TimeStamp &other) const; + bool operator>(const TimeStamp &other) const; + friend std::ostream &operator<<(std::ostream &os, const TimeStamp &t); + bool operator==(const TimeStamp &rhs) const { return time_ == rhs.time_; } + + class InvalidTimeStamp : public std::domain_error { + public: + InvalidTimeStamp() : std::domain_error("invalid timestamp"){}; + ~InvalidTimeStamp() noexcept override = default; + InvalidTimeStamp(const InvalidTimeStamp &) noexcept = default; + InvalidTimeStamp(InvalidTimeStamp &&) noexcept = default; + InvalidTimeStamp &operator=(const InvalidTimeStamp &) noexcept = default; + InvalidTimeStamp &operator=(InvalidTimeStamp &&) noexcept = default; + }; + + private: + std::string time_; +}; + +std::ostream &operator<<(std::ostream &os, const TimeStamp &t); + +/// General data structures. +namespace data { + +struct ResultCode { + // Keep these in sync with AKInstallationResultCode ASN.1 definitions. + enum class Numeric { + kOk = 0, + /// Operation has already been processed + kAlreadyProcessed = 1, + /// Metadata verification failed + kVerificationFailed = 3, + /// Package installation failed + kInstallFailed = 4, + /// Package download failed + kDownloadFailed = 5, + /// SWM Internal integrity error + kInternalError = 18, + /// Other error + kGeneralError = 19, + // Install needs to be finalized (e.g: reboot) + kNeedCompletion = 21, + // Customer specific + kCustomError = 22, + // Unknown + kUnknown = -1, + }; + + // note: intentionally *not* explicit, to make the common case easier + // NOLINTNEXTLINE(google-explicit-constructor, hicpp-explicit-conversions) + ResultCode(ResultCode::Numeric in_num_code) : num_code(in_num_code) {} + ResultCode(ResultCode::Numeric in_num_code, std::string text_code_in) + : num_code(in_num_code), text_code(std::move(text_code_in)) {} + + bool operator==(const ResultCode &rhs) const { return num_code == rhs.num_code && ToString() == rhs.ToString(); } + bool operator!=(const ResultCode &rhs) const { return !(*this == rhs); } + friend std::ostream &operator<<(std::ostream &os, const ResultCode &result_code); + + Numeric num_code; + std::string text_code; + + // Allows to have a numeric code with a default representation, but also with + // any string representation. This is specifically useful for campaign success + // analysis, because the device installation report concatenates the + // individual ECU ResultCodes. + std::string ToString() const { + if (!text_code.empty()) { + return text_code; + } + + return std::string(string_repr.at(num_code)); + } + + // non-lossy reprensation for serialization + std::string toRepr() const; + static ResultCode fromRepr(const std::string &repr); + + private: + static const std::map string_repr; +}; + +std::ostream &operator<<(std::ostream &os, const ResultCode &result_code); + +struct InstallationResult { + InstallationResult() = default; + InstallationResult(ResultCode result_code_in, std::string description_in) + : success(result_code_in.num_code == ResultCode::Numeric::kOk || + result_code_in.num_code == ResultCode::Numeric::kAlreadyProcessed), + result_code(std::move(result_code_in)), + description(std::move(description_in)) {} + InstallationResult(bool success_in, ResultCode result_code_in, std::string description_in) + : success(success_in), result_code(std::move(result_code_in)), description(std::move(description_in)) {} + + Json::Value toJson() const; + bool isSuccess() const { return success; }; + bool needCompletion() const { return result_code == ResultCode::Numeric::kNeedCompletion; } + + bool success{true}; + ResultCode result_code{ResultCode::Numeric::kOk}; + std::string description; +}; + +} // namespace data + +namespace Uptane { + +class RepositoryType; +class Role; +struct MetaPairHash; + +using MetaBundle = std::unordered_map, std::string, MetaPairHash>; + +struct InstalledImageInfo { + InstalledImageInfo() = default; + InstalledImageInfo(std::string name_in, uint64_t len_in, std::string hash_in) + : name(std::move(name_in)), len(len_in), hash(std::move(hash_in)) {} + std::string name; + uint64_t len{0}; + std::string hash; +}; + +class HardwareIdentifier { + public: + // https://github.com/uptane/ota-tuf/blob/master/libtuf/src/main/scala/com/advancedtelematic/libtuf/data/TufDataType.scala#L23 + static const int kMinLength = 0; + static const int kMaxLength = 200; + + static HardwareIdentifier Unknown() { return HardwareIdentifier("Unknown"); } + explicit HardwareIdentifier(const std::string &hwid) : hwid_(hwid) { + /* if (hwid.length() < kMinLength) { + throw std::out_of_range("Hardware Identifier too short"); + } */ + if (kMaxLength < hwid.length()) { + throw std::out_of_range("Hardware Identifier too long"); + } + } + + std::string ToString() const { return hwid_; } + + bool operator==(const HardwareIdentifier &rhs) const { return hwid_ == rhs.hwid_; } + bool operator!=(const HardwareIdentifier &rhs) const { return !(*this == rhs); } + + bool operator<(const HardwareIdentifier &rhs) const { return hwid_ < rhs.hwid_; } + friend std::ostream &operator<<(std::ostream &os, const HardwareIdentifier &hwid); + friend struct std::hash; + + private: + std::string hwid_; +}; + +std::ostream &operator<<(std::ostream &os, const HardwareIdentifier &hwid); + +class EcuSerial { + public: + // https://github.com/advancedtelematic/ota-tuf/blob/master/libtuf/src/main/scala/com/advancedtelematic/libtuf/data/TufDataType.scala + static const int kMinLength = 1; + static const int kMaxLength = 64; + + static EcuSerial Unknown() { return EcuSerial("Unknown"); } + explicit EcuSerial(const std::string &ecu_serial) : ecu_serial_(ecu_serial) { + if (ecu_serial.length() < kMinLength) { + throw std::out_of_range("ECU serial identifier is too short"); + } + if (kMaxLength < ecu_serial.length()) { + throw std::out_of_range("ECU serial identifier is too long"); + } + } + + std::string ToString() const { return ecu_serial_; } + + bool operator==(const EcuSerial &rhs) const { return ecu_serial_ == rhs.ecu_serial_; } + bool operator!=(const EcuSerial &rhs) const { return !(*this == rhs); } + + bool operator<(const EcuSerial &rhs) const { return ecu_serial_ < rhs.ecu_serial_; } + friend std::ostream &operator<<(std::ostream &os, const EcuSerial &ecu_serial); + friend struct std::hash; + + private: + std::string ecu_serial_; +}; + +std::ostream &operator<<(std::ostream &os, const EcuSerial &ecu_serial); + +using EcuMap = std::map; + +class Target { + public: + // From Uptane metadata + Target(std::string filename, const Json::Value &content); + // Internal use only. Only used for reading installed_versions list and by + // various tests. + Target(std::string filename, EcuMap ecus, std::vector hashes, uint64_t length, std::string correlation_id = "", + std::string type = "UNKNOWN"); + + static Target Unknown(); + + const EcuMap &ecus() const { return ecus_; } + std::string filename() const { return filename_; } + std::string sha256Hash() const; + std::string sha512Hash() const; + const std::vector &hashes() const { return hashes_; } + const std::vector &hardwareIds() const { return hwids_; } + std::string custom_version() const; + Json::Value custom_data() const { return custom_; } + void updateCustom(const Json::Value &custom); + std::string correlation_id() const { return correlation_id_; } + void setCorrelationId(std::string correlation_id) { correlation_id_ = std::move(correlation_id); } + uint64_t length() const { return length_; } + bool IsValid() const { return valid; } + std::string uri() const { return uri_; } + void setUri(std::string uri) { uri_ = std::move(uri); } + bool MatchHash(const Hash &hash) const; + + void InsertEcu(const std::pair &pair) { ecus_.insert(pair); } + + bool IsForEcu(const EcuSerial &ecuIdentifier) const { + return (std::find_if(ecus_.cbegin(), ecus_.cend(), + [&ecuIdentifier](const std::pair &pair) { + return pair.first == ecuIdentifier; + }) != ecus_.cend()); + } + + /** + * Is this an OSTree target? + * OSTree targets need special treatment because the hash doesn't represent + * the contents of the update itself, instead it is the hash (name) of the + * root commit object. + */ + bool IsOstree() const; + std::string type() const { return type_; } + + // Comparison is usually not meaningful. Use MatchTarget instead. + bool operator==(const Target &t2) = delete; + bool MatchTarget(const Target &t2) const; + Json::Value toDebugJson() const; + friend std::ostream &operator<<(std::ostream &os, const Target &t); + InstalledImageInfo getTargetImageInfo() const { return {filename(), length(), sha256Hash()}; } + + private: + bool valid{true}; + std::string filename_; + std::string type_; + EcuMap ecus_; // Director only + std::vector hashes_; + std::vector hwids_; // Image repo only + Json::Value custom_; + uint64_t length_{0}; + std::string correlation_id_; + std::string uri_; + + std::string hashString(Hash::Type type) const; +}; + +std::ostream &operator<<(std::ostream &os, const Target &t); + +class Manifest : public Json::Value { + public: + // NOLINTNEXTLINE(google-explicit-constructor, hicpp-explicit-conversions) + Manifest(const Json::Value &value = Json::Value()) : Json::Value(value) {} + + std::string filepath() const; + Hash installedImageHash() const; + std::string signature() const; + std::string signedBody() const; + bool verifySignature(const PublicKey &pub_key) const; +}; + +// NOLINTNEXTLINE(clang-diagnostic-unused-function) +static inline VerificationType VerificationTypeFromString(std::string vt_str) { + std::transform(vt_str.begin(), vt_str.end(), vt_str.begin(), ::tolower); + if (vt_str == "tuf") { + return VerificationType::kTuf; + } else { + return VerificationType::kFull; + } +} + +// NOLINTNEXTLINE(clang-diagnostic-unused-function) +static inline std::string VerificationTypeToString(const VerificationType vtype) { + std::string type_s; + switch (vtype) { + case VerificationType::kFull: + default: + type_s = "Full"; + break; + case VerificationType::kTuf: + type_s = "Tuf"; + break; + } + return type_s; +} + +} // namespace Uptane + +struct SecondaryInfo { + SecondaryInfo() : serial(Uptane::EcuSerial::Unknown()), hw_id(Uptane::HardwareIdentifier::Unknown()) {} + SecondaryInfo(Uptane::EcuSerial serial_in, Uptane::HardwareIdentifier hw_id_in, std::string type_in, + PublicKey pub_key_in, std::string extra_in) + : serial(std::move(serial_in)), + hw_id(std::move(hw_id_in)), + type(std::move(type_in)), + pub_key(std::move(pub_key_in)), + extra(std::move(extra_in)) {} + + Uptane::EcuSerial serial; + Uptane::HardwareIdentifier hw_id; + std::string type; + PublicKey pub_key; + + std::string extra; +}; + +#endif diff --git a/scripts/amalgamate-jsoncpp.sh b/scripts/amalgamate-jsoncpp.sh new file mode 100755 index 0000000000..1f8b1194e9 --- /dev/null +++ b/scripts/amalgamate-jsoncpp.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +set -euo pipefail + +JSONCPP_SRCDIR=$1 +DSTDIR=$2 + +TMPDIR=$(mktemp -d) +trap 'rm -rf $TMPDIR' EXIT + +python3 "$JSONCPP_SRCDIR/amalgamate.py" -t "$JSONCPP_SRCDIR" -s "$TMPDIR/jsoncpp.cc" > /dev/null + +if [ -d "$DSTDIR" ]; then + if diff -rq "$DSTDIR" "$TMPDIR" > /dev/null; then + # output already matches amalgated source + exit 0 + fi + rm -rf "$DSTDIR" +fi + +cp -r "$TMPDIR" "$DSTDIR" diff --git a/scripts/build_and_push_dockerised_aktualizr_for_e2e.sh b/scripts/build_and_push_dockerised_aktualizr_for_e2e.sh deleted file mode 100755 index 0de9b8996c..0000000000 --- a/scripts/build_and_push_dockerised_aktualizr_for_e2e.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -cd .. - -docker build -f docker/Dockerfile.ubuntu.xenial -t advancedtelematic/aktualizr-base . -docker build -f docker/Dockerfile.e2e -t advancedtelematic/aktualizr . -docker push advancedtelematic/aktualizr diff --git a/scripts/build_ubuntu.sh b/scripts/build_ubuntu.sh index 0abc66f443..03c8a56eb5 100755 --- a/scripts/build_ubuntu.sh +++ b/scripts/build_ubuntu.sh @@ -17,4 +17,4 @@ mkdir -p "$TEST_INSTALL_DESTDIR" LDFLAGS="-s" "$GITREPO_ROOT/scripts/test.sh" git -C "$GITREPO_ROOT" fetch --tags --unshallow || true -git -C "$GITREPO_ROOT" describe > "$TEST_INSTALL_DESTDIR/aktualizr-version" +"$GITREPO_ROOT/scripts/get_version.sh" git "$GITREPO_ROOT" > "$TEST_INSTALL_DESTDIR/aktualizr-version" diff --git a/scripts/clang-tidy-wrapper.sh b/scripts/clang-tidy-wrapper.sh new file mode 100755 index 0000000000..b760553ef4 --- /dev/null +++ b/scripts/clang-tidy-wrapper.sh @@ -0,0 +1,17 @@ +#! /bin/bash + +set -euo pipefail + +CLANG_TIDY="${1}" +CMAKE_BINARY_DIR="${2}" +CMAKE_SOURCE_DIR="${3}" +FILE="${4}" + +if [[ ! -e "${CMAKE_BINARY_DIR}/compile_commands.json" ]]; then + echo "compile_commands.json not found!" + exit 1 +fi + +${CLANG_TIDY} -quiet -header-filter="\(\(${CMAKE_SOURCE_DIR}|\\.\\.\)/src/|include/libaktualizr/\).*" \ + --checks=-clang-analyzer-cplusplus.NewDeleteLeaks,-clang-analyzer-core.NonNullParamChecker \ + --extra-arg-before=-Wno-unknown-warning-option -format-style=file -p "${CMAKE_BINARY_DIR}" "${FILE}" diff --git a/scripts/export_to_hsm.sh b/scripts/export_to_hsm.sh index 71e08fb5ca..bf5187407e 100755 --- a/scripts/export_to_hsm.sh +++ b/scripts/export_to_hsm.sh @@ -6,7 +6,7 @@ TOKEN_DIR=${TOKEN_DIR:-/var/lib/softhsm/tokens} SOFTHSM2_CONF=${SOFTHSM2_CONF:-/etc/softhsm/softhsm2.conf} sed -i "s:^directories\\.tokendir = .*$:directories.tokendir = ${TOKEN_DIR}:" "${SOFTHSM2_CONF}" -mkdir -p ${TOKEN_DIR} +mkdir -p "${TOKEN_DIR}" softhsm2-util --init-token --slot 0 --label "Virtual token" --pin 1234 --so-pin 1234 SLOT=$(softhsm2-util --show-slots | grep -m 1 -oP 'Slot \K[0-9]+') diff --git a/scripts/garage-fsck.sh b/scripts/garage-fsck.sh new file mode 100755 index 0000000000..1847736a11 --- /dev/null +++ b/scripts/garage-fsck.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +CREDENTIALS="${1}" +OSTREE_REF="${2}" +GITREPO_ROOT="$(readlink -f "$(dirname "$0")/..")" +GARAGE_CHECK="${3:-${GITREPO_ROOT}/build/src/sota_tools/garage-check}" +LOCAL_REPO="${4:-temp-repo}" + +"${GARAGE_CHECK}" -j "${CREDENTIALS}" -w --ref "${OSTREE_REF}" -t "${LOCAL_REPO}" + +cat << EOF >> "${LOCAL_REPO}/config" +[core] +repo_version=1 +mode=archive-z2 +EOF + +# This directory is required despite that it appears to be unused. +mkdir -p "${LOCAL_REPO}/refs/remotes" +mkdir -p "${LOCAL_REPO}/refs/heads" +echo -n "${OSTREE_REF}" > "${LOCAL_REPO}/refs/heads/master" + +ostree fsck --repo "${LOCAL_REPO}" diff --git a/scripts/get_garage_sign.py b/scripts/get-garage-sign.py similarity index 64% rename from scripts/get_garage_sign.py rename to scripts/get-garage-sign.py index b2ceb0961f..3cb9f772e5 100755 --- a/scripts/get_garage_sign.py +++ b/scripts/get-garage-sign.py @@ -12,11 +12,18 @@ from pathlib import Path +#aws_bucket_url = 'https://tuf-cli-releases.ota.here.com/' +aws_bucket_url = 'https://storage.googleapis.com/public-shared-artifacts-fio/mirrors/ota-tuf-cli-releases/' +default_version_name = 'cli-0.7.2-48-gf606131.tgz' +default_version_sha256 = 'f20c9f3e08fff277a78786025105298322c54874ca66a753e7ad0b2ffb239502' +default_version_size = '59517659' + + def main(): parser = argparse.ArgumentParser(description='Download a specific or the latest version of garage-sign') parser.add_argument('-a', '--archive', help='static local archive') - parser.add_argument('-n', '--name', help='specific version to download') - parser.add_argument('-s', '--sha256', help='expected hash of requested version') + parser.add_argument('-n', '--name', help='specific version to download', default=default_version_name) + parser.add_argument('-s', '--sha256', help='expected hash of requested version', default=default_version_sha256) parser.add_argument('-o', '--output', type=Path, default=Path('.'), help='download directory') args = parser.parse_args() @@ -27,7 +34,7 @@ def main(): if args.archive: path = args.archive else: - path = find_version(args.name, args.sha256, args.output) + path = download_garage_cli_tools(args.name, args.sha256, args.output) if path is None: return 1 @@ -48,7 +55,7 @@ def find_version(version_name, sha256_hash, output): if version_name and not sha256_hash: print('Warning: specific version requested without specifying the sha256 hash.') - r = urllib.request.urlopen('https://ats-tuf-cli-releases.s3-eu-central-1.amazonaws.com') + r = urllib.request.urlopen(aws_bucket_url) if r.status != 200: print('Error: unable to request index!') return None @@ -59,9 +66,8 @@ def find_version(version_name, sha256_hash, output): versions = dict() cli_items = [i for i in items if i.find(ns + 'Key').text.startswith('cli-')] for i in cli_items: - # ETag is md5sum. versions[i.find(ns + 'Key').text] = (i.find(ns + 'LastModified').text, - i.find(ns + 'ETag').text[1:-1]) + i.find(ns + 'Size').text) if version_name: name = version_name if name not in versions: @@ -76,10 +82,10 @@ def find_version(version_name, sha256_hash, output): name = max(versions, key=(lambda name: (versions[name][0]))) path = output.joinpath(name) - md5_hash = versions[name][1] - if not path.is_file() or not check_hashes(name, path, md5_hash, sha256_hash): + size = versions[name][1] + if not path.is_file() or not verify(name, path, size, sha256_hash): print('Downloading ' + name + ' from server...') - if download(name, path, md5_hash, sha256_hash): + if download(name, path, size, sha256_hash): print(name + ' successfully downloaded and validated.') return path else: @@ -88,36 +94,43 @@ def find_version(version_name, sha256_hash, output): return path -def download(name, path, md5_hash, sha256_hash): - r = urllib.request.urlopen('https://ats-tuf-cli-releases.s3-eu-central-1.amazonaws.com/' + name) +def download(name, path, size, sha256_hash): + r = urllib.request.urlopen(aws_bucket_url + name) if r.status != 200: print('Error: unable to request file!') return False with path.open(mode='wb') as f: shutil.copyfileobj(r, f) - return check_hashes(name, path, md5_hash, sha256_hash) + return verify(name, path, size, sha256_hash) + + +def download_garage_cli_tools(name, sha256, output): + path = output.joinpath(name) + download_url = os.path.join(aws_bucket_url, name) + print('Downloading ' + name + ' from server, url: {}...'.format(download_url)) + if download(name, path, default_version_size, sha256): + print(name + ' successfully downloaded and validated.') + return path + else: + return None -def check_hashes(name, path, md5_hash, sha256_hash): +def verify(name, path, size, sha256_hash): if not tarfile.is_tarfile(str(path)): print('Error: ' + name + ' is not a valid tar archive!') return False - m = hashlib.md5() + actual_size = os.path.getsize(str(path)) + if actual_size != int(size): + print('Error: size of ' + name + ' (' + str(actual_size) + ') does not match expected value (' + size + ')!') + return False if sha256_hash: s = hashlib.sha256() - with path.open(mode='rb') as f: - data = f.read() - m.update(data) - if sha256_hash: + with path.open(mode='rb') as f: + data = f.read() s.update(data) - if m.hexdigest() != md5_hash: - print('Error: md5 hash of ' + name + ' does not match expected value!') - print(m.hexdigest()) - print(md5_hash) - return False - if sha256_hash and s.hexdigest() != sha256_hash: - print('Error: sha256 hash of ' + name + ' does not match provided value!') - return False + if s.hexdigest() != sha256_hash: + print('Error: sha256 hash of ' + name + ' does not match provided value!') + return False return True diff --git a/scripts/get_version.sh b/scripts/get_version.sh new file mode 100755 index 0000000000..713b4ef556 --- /dev/null +++ b/scripts/get_version.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -euo pipefail + +GIT=${1:-git} +REPO=${2:-.} + +"$GIT" -C "$REPO" describe --long | tr -d '\n' diff --git a/scripts/make_src_archive.sh b/scripts/make_src_archive.sh index b31286e79c..86d3e4329f 100755 --- a/scripts/make_src_archive.sh +++ b/scripts/make_src_archive.sh @@ -1,15 +1,27 @@ #!/usr/bin/env bash -set -eu +set -eux -OUTPUT=$1 -REPO=${2:-.} +SCRIPTS_DIR=$(readlink -f "$(dirname "$0")") + +OUTPUT=$(realpath "$1") +REPO=$(realpath "${2:-.}") + +# Just in case this wasn't done before. +git -C "$REPO" submodule update --init --recursive python3 -m venv venv +# shellcheck disable=SC1091 . venv/bin/activate -pip install 'git_archive_all==1.19.4' +pip install 'git_archive_all==1.21.0' + +TMPDIR=$(mktemp -d) +trap 'rm -rf $TMPDIR' EXIT +cd "$TMPDIR" + +# store version in archive +"$SCRIPTS_DIR/get_version.sh" git "$REPO" > VERSION -cd "$REPO" -git-archive-all "$OUTPUT" +git-archive-all -C "$REPO" --extra VERSION "$OUTPUT" diff --git a/scripts/publish_github_docs.sh b/scripts/publish_github_docs.sh new file mode 100755 index 0000000000..9ad351c98c --- /dev/null +++ b/scripts/publish_github_docs.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +set -euo pipefail + +GIT_REMOTE=https://github.com/advancedtelematic/aktualizr +DOX_DOCS=${DOX_DOCS:-$TEST_BUILD_DIR/docs/doxygen/html} +WORKTREE=${WORKTREE:-$TEST_BUILD_DIR/pages} +DRY_RUN=${DRY_RUN:-0} +DESCRIBE=$(git describe) +LAST_TAG=$(git describe --abbrev=0) + +set -x + +git remote add github_rls "$GIT_REMOTE" || true +git fetch github_rls +if ! [ -d "$WORKTREE" ]; then + mkdir -p "$WORKTREE" + git worktree add "$WORKTREE" github_rls/gh-pages +fi + +gitcommit() ( + export GIT_AUTHOR_NAME="HERE OTA Gitlab CI" + export GIT_AUTHOR_EMAIL="gitlab@example.org" + export GIT_COMMITTER_NAME=$GIT_AUTHOR_NAME + export GIT_COMMITTER_EMAIL=$GIT_AUTHOR_EMAIL + git commit "$@" +) + +DOX_DOCS=$(realpath "$DOX_DOCS") +WORKTREE=$(realpath "$WORKTREE") + +( +cd "$WORKTREE" + +git reset --hard github_rls/gh-pages + +# create release directory +if [ -d "$WORKTREE/$LAST_TAG" ]; then + echo "Docs for $LAST_TAG already published, skipping..." +else + cp -r "$DOX_DOCS" "$WORKTREE/$LAST_TAG" + git add "$WORKTREE/$LAST_TAG" + gitcommit -m "$LAST_TAG release" +fi + +# create last snapshot + +# cleanup old snapshot +find . \( -regex './[^/]*' -and -type f -and -not -path ./.git \) -or \( -path './search/*' \) -exec git rm -r {} + +cp -ar "$DOX_DOCS/." . +git add . +if git diff --cached --quiet; then + echo "Docs already updated to the latest version, skipping..." +else + gitcommit -m "Update docs to latest ($DESCRIBE)" +fi + +if [ "$DRY_RUN" != 1 ]; then + git config credential.${GIT_REMOTE}.username "$GITHUB_API_USER" + # shellcheck disable=SC2016 + git config credential.${GIT_REMOTE}.helper '!f() { echo "password=$(echo $GITHUB_API_TOKEN)"; }; f' + git push github_rls HEAD:gh-pages +fi +) diff --git a/scripts/publish_github_rls.py b/scripts/publish_github_rls.py index 67d437ed90..716c27f22c 100755 --- a/scripts/publish_github_rls.py +++ b/scripts/publish_github_rls.py @@ -6,9 +6,25 @@ import os.path import re import sys +import time import urllib.request +def urlopen_retry(req): + delay = 1 + last_exc = Exception() + for k in range(5): + try: + return urllib.request.urlopen(req) + except urllib.error.HTTPError as e: + if e.code < 500: + raise + last_exc = e + time.sleep(delay) + delay *= 2 + raise last_exc + + def main(): if len(sys.argv) < 2: print("usage: {} RLS_TAG [assets]".format(sys.argv[0])) @@ -19,15 +35,19 @@ def main(): api_token = os.environ["GITHUB_API_TOKEN"] files = sys.argv[2:] - req = urllib.request.Request("https://api.github.com/repos/advancedtelematic/aktualizr/releases/tags/{}".format(rls_tag), \ - headers={ - "Accept": "application/vnd.github.v3+json", - "Authorization": "token {}".format(api_token), - "Content-Type": "application/json" - }, method="GET" - ) + req = urllib.request.Request( + "https://api.github.com/repos/uptane/aktualizr/releases/tags/{}".format( + rls_tag + ), + headers={ + "Accept": "application/vnd.github.v3+json", + "Authorization": "token {}".format(api_token), + "Content-Type": "application/json", + }, + method="GET", + ) try: - with urllib.request.urlopen(req) as f: + with urlopen_retry(req) as f: json.loads(f.read()) except urllib.error.HTTPError as e: if e.code != 404: @@ -37,21 +57,18 @@ def main(): return 0 # create release - c = { - "tag_name": rls_tag, - "name": rls_tag, - "body": "", - "draft": False - } - req = urllib.request.Request("https://api.github.com/repos/advancedtelematic/aktualizr/releases", - data=json.dumps(c).encode(), - headers={ - "Accept": "application/vnd.github.v3+json", - "Authorization": "token {}".format(api_token), - "Content-Type": "application/json" - }, method="POST" - ) - with urllib.request.urlopen(req) as f: + c = {"tag_name": rls_tag, "name": rls_tag, "body": "", "draft": False} + req = urllib.request.Request( + "https://api.github.com/repos/uptane/aktualizr/releases", + data=json.dumps(c).encode(), + headers={ + "Accept": "application/vnd.github.v3+json", + "Authorization": "token {}".format(api_token), + "Content-Type": "application/json", + }, + method="POST", + ) + with urlopen_retry(req) as f: resp = json.loads(f.read()) upload_url = re.sub("{.*}", "", resp["upload_url"]) @@ -60,16 +77,18 @@ def main(): bn = os.path.basename(fn) url = upload_url + "?name={}".format(bn) with open(fn, "rb") as f: - req = urllib.request.Request(url, - data=f, - headers={ - "Accept": "application/vnd.github.v3+json", - "Authorization": "token {}".format(api_token), - "Content-Length": str(os.path.getsize(fn)), - "Content-Type": mimetypes.guess_type(bn)[0] - }, method="POST" - ) - urllib.request.urlopen(req) + req = urllib.request.Request( + url, + data=f, + headers={ + "Accept": "application/vnd.github.v3+json", + "Authorization": "token {}".format(api_token), + "Content-Length": str(os.path.getsize(fn)), + "Content-Type": mimetypes.guess_type(bn)[0], + }, + method="POST", + ) + urlopen_retry(req) return 0 diff --git a/scripts/push-sota-tools-image.sh b/scripts/push-sota-tools-image.sh deleted file mode 100755 index 99c2ad18f0..0000000000 --- a/scripts/push-sota-tools-image.sh +++ /dev/null @@ -1,7 +0,0 @@ -#! /bin/bash -set -euo pipefail - -DOCKERFILE=src/sota_tools/Dockerfile -docker build -t advancedtelematic/sota-tools -f ${DOCKERFILE} . -docker login -u="$DOCKER_USERNAME" -p="$DOCKER_PASSWORD" -docker push advancedtelematic/sota-tools diff --git a/scripts/run_docker_test.sh b/scripts/run_docker_test.sh index 933529b18a..1c78935f02 100755 --- a/scripts/run_docker_test.sh +++ b/scripts/run_docker_test.sh @@ -5,14 +5,14 @@ set -euo pipefail # Utility to help running in-docker tests in the same conditions as CI (Gitlab) # # example: -# ./scripts/run_docker_test.sh docker/Dockerfile.debian.testing \ +# ./scripts/run_docker_test.sh docker/Dockerfile.ubuntu.focal \ # -eTEST_BUILD_DIR=build-openssl11 \ # -eTEST_CMAKE_BUILD_TYPE=Valgrind \ # -eTEST_TESTSUITE_ONLY=crypto \ # -eTEST_WITH_STATICTESTS=1 \ # -- ./scripts/test.sh # alternatively: -# ./scripts/run_docker_test.sh docker/Dockerfile.debian.testing +# ./scripts/run_docker_test.sh docker/Dockerfile.ubuntu.focal # # (shell starting...) # testuser@xxx:yyy$ TEST_BUILD_DIR=build-openssl11 \ # TEST_CMAKE_BUILD_TYPE=Valgrind \ @@ -48,10 +48,15 @@ docker build -t "${IMG_TAG}" -f "$DOCKERFILE" . # Prevent DOCKER_OPTS[@]: unbound variable # From SO: https://stackoverflow.com/a/34361807/6096518 -OPTS_STR=${DOCKER_OPTS[@]+"${DOCKER_OPTS[@]}"} +OPTS_STR="${DOCKER_OPTS[*]+"${DOCKER_OPTS[*]}"}" # run under current user, mounting current directory at the same location in the container # # note: we've switched back to running the tests as root on CI when we switched from Jenkins to Gitlab # it would be great to revert to the old way at some point -docker run -u "$(id -u):$(id -g)" -v "$PWD:$PWD" -w "$PWD" --rm $OPTS_STR -it "${IMG_TAG}" "$@" + +if [[ -z "${OPTS_STR}" ]]; then + docker run -u "$(id -u):$(id -g)" -v "$PWD:$PWD" -w "$PWD" --rm -it "${IMG_TAG}" "$@" +else + docker run -u "$(id -u):$(id -g)" -v "$PWD:$PWD" -w "$PWD" --rm "${OPTS_STR}" -it "${IMG_TAG}" "$@" +fi diff --git a/scripts/run_travis.py b/scripts/run_travis.py deleted file mode 100755 index 24acd522d8..0000000000 --- a/scripts/run_travis.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import sys -import pprint -import shlex -import yaml - -from pathlib import Path - - -def gen_test_script(ty, job, output): - output.write('#!/usr/bin/env bash\n\n') - output.write('set -ex\n') - - # extract environment variables - e_str = ty['env'][job] - for v_assign in shlex.split(e_str): - output.write(v_assign + '\n') - output.write('\n') - - # extract script lines - for l in ty['script']: - output.write(l + '\n') - - -def main(): - parser = argparse.ArgumentParser(description='Run travis jobs locally') - parser.add_argument('--yml', '-y', metavar='travis.yml', type=Path, - default=Path('.travis.yml'), help='.travis.yml file') - parser.add_argument('--job', '-j', metavar='JOB', type=int, default=0) - parser.add_argument('--output', '-o', metavar='OUTPUT.sh', type=argparse.FileType('w'), - default=sys.stdout) - parser.add_argument('--verbose', '-v', action='store_true') - args = parser.parse_args() - - ymlf = args.yml - with open(ymlf, 'r') as f: - yml = f.read() - - ty = yaml.load(yml) - - if args.verbose: - pp = pprint.PrettyPrinter(indent=4, stream=sys.stderr) - pp.pprint(ty) - - gen_test_script(ty, args.job, args.output) - - return 0 - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/scripts/test.sh b/scripts/test.sh index 2a8ca7f7e1..afc9adb8ab 100755 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -3,7 +3,6 @@ set -euo pipefail GITREPO_ROOT="${1:-$(readlink -f "$(dirname "$0")/..")}" -TRAVIS_COMMIT=${TRAVIS_COMMIT:-} # Test options: test stages, additional checkers, compile options TEST_BUILD_DIR=${TEST_BUILD_DIR:-build-test} @@ -18,11 +17,7 @@ TEST_WITH_SOTA_TOOLS=${TEST_WITH_SOTA_TOOLS:-1} TEST_WITH_P11=${TEST_WITH_P11:-0} TEST_WITH_OSTREE=${TEST_WITH_OSTREE:-1} TEST_WITH_DEB=${TEST_WITH_DEB:-1} -TEST_WITH_DOCKERAPP=${TEST_WITH_DOCKERAPP:-0} -TEST_WITH_ISOTP=${TEST_WITH_ISOTP:-1} -TEST_WITH_PARTIAL=${TEST_WITH_PARTIAL:-1} TEST_WITH_FAULT_INJECTION=${TEST_WITH_FAULT_INJECTION:-0} -TEST_WITH_LOAD_TESTS=${TEST_WITH_LOAD_TESTS:-0} TEST_CC=${TEST_CC:-gcc} TEST_CMAKE_GENERATOR=${TEST_CMAKE_GENERATOR:-Ninja} @@ -46,16 +41,12 @@ if [[ $TEST_WITH_COVERAGE = 1 ]]; then CMAKE_ARGS+=("-DBUILD_WITH_CODE_COVERAGE= if [[ $TEST_WITH_SOTA_TOOLS = 1 ]]; then CMAKE_ARGS+=("-DBUILD_SOTA_TOOLS=ON"); fi if [[ $TEST_WITH_P11 = 1 ]]; then CMAKE_ARGS+=("-DBUILD_P11=ON") - CMAKE_ARGS+=("-DTEST_PKCS11_ENGINE_PATH=${TEST_PKCS11_ENGINE_PATH}") + CMAKE_ARGS+=("-DPKCS11_ENGINE_PATH=${TEST_PKCS11_ENGINE_PATH}") CMAKE_ARGS+=("-DTEST_PKCS11_MODULE_PATH=${TEST_PKCS11_MODULE_PATH}") fi if [[ $TEST_WITH_OSTREE = 1 ]]; then CMAKE_ARGS+=("-DBUILD_OSTREE=ON"); fi if [[ $TEST_WITH_DEB = 1 ]]; then CMAKE_ARGS+=("-DBUILD_DEB=ON"); fi -if [[ $TEST_WITH_DOCKERAPP = 1 ]]; then CMAKE_ARGS+=("-DBUILD_DOCKERAPP=ON"); fi -if [[ $TEST_WITH_ISOTP = 1 ]]; then CMAKE_ARGS+=("-DBUILD_ISOTP=ON"); fi -if [[ $TEST_WITH_PARTIAL = 1 ]]; then CMAKE_ARGS+=("-DBUILD_PARTIAL=ON"); fi if [[ $TEST_WITH_FAULT_INJECTION = 1 ]]; then CMAKE_ARGS+=("-DFAULT_INJECTION=ON"); fi -if [[ $TEST_WITH_LOAD_TESTS = 1 ]]; then CMAKE_ARGS+=("-DBUILD_LOAD_TESTS=ON"); fi if [[ -n $TEST_SOTA_PACKED_CREDENTIALS ]]; then CMAKE_ARGS+=("-DSOTA_PACKED_CREDENTIALS=$TEST_SOTA_PACKED_CREDENTIALS"); fi @@ -133,6 +124,7 @@ if [[ $TEST_DRYRUN != 1 ]]; then fi set -x + git config --global --add safe.directory "${GITREPO_ROOT}" cmake "${CMAKE_ARGS[@]}" "${GITREPO_ROOT}" || add_fatal_failure "cmake configure" ) fi @@ -181,10 +173,10 @@ if [[ $TEST_WITH_TESTSUITE = 1 ]]; then set -x run_make coverage || add_failure "testsuite with coverage" - if [[ -n $TRAVIS_COMMIT ]]; then + if [[ -n ${CODECOV_TOKEN:-} ]]; then bash <(curl -s https://codecov.io/bash) -f '!*/#usr*' -f '!*/^#third_party*' -R "${GITREPO_ROOT}" -s . > /dev/null else - echo "Not inside Travis, skipping codecov.io upload" + echo "Skipping codecov.io upload" fi set +x fi diff --git a/scripts/test_garage_deploy_deb.sh b/scripts/test_garage_deploy_deb.sh deleted file mode 100755 index c0af9381e7..0000000000 --- a/scripts/test_garage_deploy_deb.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh - -set -exu - -PKG_SRCDIR="${1:-/persistent}" -INSTALL_DOCKERFILE="${2:-Dockerfile}" -IMG_TAG=garage-deploy-$(cat /proc/sys/kernel/random/uuid) - -echo "Building docker for testing garage-deploy deb package inside it." -docker build -t "${IMG_TAG}" -f "${INSTALL_DOCKERFILE}" . -echo "Running docker container with garage-deploy debian package inside." -docker run --rm -v "${PKG_SRCDIR}":/persistent -t "${IMG_TAG}" /scripts/test_install_garage_deploy.sh diff --git a/scripts/testupdate_server.py b/scripts/testupdate_server.py index 589b680609..e8f8221c04 100755 --- a/scripts/testupdate_server.py +++ b/scripts/testupdate_server.py @@ -1,5 +1,6 @@ #!/usr/bin/python3 +import os.path import sys import socket from http.server import BaseHTTPRequestHandler, HTTPServer @@ -9,9 +10,14 @@ class Handler(BaseHTTPRequestHandler): def do_GET(self): local_path = self.path print("GET: " + local_path) + full_path = self.server.base_dir + "/fake_root/repo/" + local_path + if not os.path.exists(full_path): + self.send_response(404) + self.end_headers() + return self.send_response(200) self.end_headers() - with open(self.server.base_dir + "/fake_root/repo/" + local_path, "rb") as fl: + with open(full_path, "rb") as fl: self.wfile.write(bytearray(fl.read())) def do_POST(self): diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index b58a14d63f..69cd31aa61 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -13,7 +13,4 @@ add_subdirectory("aktualizr_info") add_subdirectory("uptane_generator") add_subdirectory("cert_provider") -add_subdirectory("aktualizr_lite") add_subdirectory("aktualizr_get") - -add_subdirectory("load_tests") diff --git a/src/aktualizr_get/CMakeLists.txt b/src/aktualizr_get/CMakeLists.txt index d815d6cc8b..822851dc00 100644 --- a/src/aktualizr_get/CMakeLists.txt +++ b/src/aktualizr_get/CMakeLists.txt @@ -1,13 +1,22 @@ add_executable(aktualizr-get main.cc get.cc) -target_link_libraries(aktualizr-get aktualizr_static_lib ${AKTUALIZR_EXTERNAL_LIBS}) +target_link_libraries(aktualizr-get aktualizr_lib) -install(TARGETS aktualizr-get RUNTIME DESTINATION bin COMPONENT aktualizr-get) +install(TARGETS aktualizr-get RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT aktualizr-get) add_aktualizr_test(NAME aktualizr_get SOURCES get.cc get_test.cc PROJECT_WORKING_DIRECTORY) +# Check the --help option works. +add_test(NAME aktualizr-get-option-help + COMMAND aktualizr-get --help) + +# Report version. +add_test(NAME aktualizr-get-option-version + COMMAND aktualizr-get --version) +set_tests_properties(aktualizr-get-option-version PROPERTIES PASS_REGULAR_EXPRESSION "Current aktualizr-get version is: ${AKTUALIZR_VERSION}") + aktualizr_source_file_checks(main.cc get.cc get.h get_test.cc) # vim: set tabstop=4 shiftwidth=4 expandtab: diff --git a/src/aktualizr_get/get.cc b/src/aktualizr_get/get.cc index 2a682383ba..9da001225d 100644 --- a/src/aktualizr_get/get.cc +++ b/src/aktualizr_get/get.cc @@ -1,12 +1,14 @@ #include "get.h" #include "crypto/keymanager.h" #include "http/httpclient.h" +#include "storage/invstorage.h" -std::string aktualizrGet(Config &config, const std::string &url) { - auto storage = INvStorage::newStorage(config.storage); +std::string aktualizrGet(Config &config, const std::string &url, const std::vector &headers, + StorageClient storage_client) { + auto storage = INvStorage::newStorage(config.storage, false, storage_client); storage->importData(config.import); - auto client = std_::make_unique(); + auto client = std_::make_unique(&headers); KeyManager keys(storage, config.keymanagerConfig()); keys.copyCertsToCurl(*client); auto resp = client->get(url, HttpInterface::kNoLimit); diff --git a/src/aktualizr_get/get.h b/src/aktualizr_get/get.h index 8e78eb2522..c1ad795671 100644 --- a/src/aktualizr_get/get.h +++ b/src/aktualizr_get/get.h @@ -1,8 +1,10 @@ #ifndef AKTUALIZR_GET_HELPERS #define AKTUALIZR_GET_HELPERS -#include "config/config.h" +#include "libaktualizr/config.h" +#include "storage/invstorage.h" -std::string aktualizrGet(Config &config, const std::string &url); +std::string aktualizrGet(Config &config, const std::string &url, const std::vector &headers, + StorageClient storage_client = StorageClient::kUptane); #endif // AKTUALIZR_GET_HELPERS diff --git a/src/aktualizr_get/get_test.cc b/src/aktualizr_get/get_test.cc index 8025729739..7ac14591c7 100644 --- a/src/aktualizr_get/get_test.cc +++ b/src/aktualizr_get/get_test.cc @@ -5,14 +5,15 @@ #include "get.h" #include "test_utils.h" -static std::string server = "http://127.0.0.1:"; +static std::string server = "http://localhost:"; TEST(aktualizr_get, good) { Config config; TemporaryDirectory dir; config.storage.path = dir.Path(); - std::string body = aktualizrGet(config, server + "/path/1/2/3"); + std::vector headers; + std::string body = aktualizrGet(config, server + "/path/1/2/3", headers); EXPECT_EQ("{\"path\": \"/path/1/2/3\"}", body); } diff --git a/src/aktualizr_get/main.cc b/src/aktualizr_get/main.cc index a766e21dd2..cf24eba583 100644 --- a/src/aktualizr_get/main.cc +++ b/src/aktualizr_get/main.cc @@ -3,15 +3,27 @@ #include #include +#include "storage/invstorage.h" -#include "config/config.h" #include "get.h" - +#include "libaktualizr/config.h" +#include "logging/logging.h" #include "utilities/aktualizr_version.h" namespace bpo = boost::program_options; -bpo::variables_map parse_options(int argc, char *argv[]) { +void check_info_options(const bpo::options_description &description, const bpo::variables_map &vm) { + if (vm.count("help") != 0 || (vm.count("url") == 0 && vm.count("version") == 0)) { + std::cout << description << '\n'; + exit(EXIT_SUCCESS); + } + if (vm.count("version") != 0) { + std::cout << "Current aktualizr-get version is: " << aktualizr_version() << "\n"; + exit(EXIT_SUCCESS); + } +} + +bpo::variables_map parse_options(int argc, char **argv) { bpo::options_description description( "A tool similar to wget that will do an HTTP get on the given URL using the device's configured credentials."); // clang-format off @@ -19,10 +31,13 @@ bpo::variables_map parse_options(int argc, char *argv[]) { // The first three are commandline only. description.add_options() ("help,h", "print usage") - ("version,v", "Current aktualizr version") - ("config,c", bpo::value >()->composing(), "configuration file or directory") + ("version,v", "Current aktualizr-get version") + ("config,c", bpo::value >()->composing(), "configuration file or directory, by default /var/sota") + ("header,H", bpo::value >()->composing(), "Additional headers to pass") + ("storage,s", bpo::value(), "options: TUF(default), Uptane") ("loglevel", bpo::value(), "set log level 0-5 (trace, debug, info, warning, error, fatal)") - ("url,u", bpo::value(), "url to get"); + ("url,u", bpo::value(), "url to get, mandatory"); + // clang-format on bpo::variables_map vm; @@ -30,12 +45,8 @@ bpo::variables_map parse_options(int argc, char *argv[]) { try { bpo::basic_parsed_options parsed_options = bpo::command_line_parser(argc, argv).options(description).run(); bpo::store(parsed_options, vm); + check_info_options(description, vm); bpo::notify(vm); - if (vm.count("help") != 0) { - std::cout << description << '\n'; - exit(EXIT_SUCCESS); - } - } catch (const bpo::required_option &ex) { // print the error and append the default commandline option description std::cout << ex.what() << std::endl << description; @@ -58,7 +69,17 @@ int main(int argc, char *argv[]) { int r = EXIT_FAILURE; try { Config config(commandline_map); - std::string body = aktualizrGet(config, commandline_map["url"].as()); + std::vector headers; + if (commandline_map.count("header") == 1) { + headers = commandline_map["header"].as>(); + } + StorageClient client = StorageClient::kTUF; + if (commandline_map.count("storage") == 1) { + if (commandline_map["storage"].as() == "Uptane") { + client = StorageClient::kUptane; + } + } + std::string body = aktualizrGet(config, commandline_map["url"].as(), headers, client); std::cout << body; r = EXIT_SUCCESS; diff --git a/src/aktualizr_info/CMakeLists.txt b/src/aktualizr_info/CMakeLists.txt index 0e65d8a8de..2e9e0807d0 100644 --- a/src/aktualizr_info/CMakeLists.txt +++ b/src/aktualizr_info/CMakeLists.txt @@ -3,18 +3,26 @@ set(AKTUALIZR_INFO_SRC main.cc aktualizr_info_config.cc) set(AKTUALIZR_INFO_HEADERS aktualizr_info_config.h) add_executable(aktualizr-info ${AKTUALIZR_INFO_SRC}) -target_link_libraries(aktualizr-info aktualizr_static_lib ${AKTUALIZR_EXTERNAL_LIBS}) - +target_link_libraries(aktualizr-info aktualizr_lib) install(TARGETS aktualizr-info COMPONENT aktualizr - RUNTIME DESTINATION bin) + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) add_aktualizr_test(NAME aktualizr_info_config SOURCES aktualizr_info_config.cc aktualizr_info_config_test.cc PROJECT_WORKING_DIRECTORY) add_aktualizr_test(NAME aktualizr_info SOURCES aktualizr_info_test.cc) +# Check the --help option works. +add_test(NAME aktualizr-info-option-help + COMMAND aktualizr-info --help) + +# Report version. +add_test(NAME aktualizr-info-option-version + COMMAND aktualizr-info --version) +set_tests_properties(aktualizr-info-option-version PROPERTIES PASS_REGULAR_EXPRESSION "Current aktualizr-info version is: ${AKTUALIZR_VERSION}") + aktualizr_source_file_checks(${AKTUALIZR_INFO_SRC} ${AKTUALIZR_INFO_HEADERS} ${TEST_SOURCES}) diff --git a/src/aktualizr_info/aktualizr_info_config.cc b/src/aktualizr_info/aktualizr_info_config.cc index db2fcb3516..0d02579fbf 100644 --- a/src/aktualizr_info/aktualizr_info_config.cc +++ b/src/aktualizr_info/aktualizr_info_config.cc @@ -1,5 +1,6 @@ #include "aktualizr_info_config.h" +#include "logging/logging.h" #include "utilities/config_utils.h" AktualizrInfoConfig::AktualizrInfoConfig(const boost::program_options::variables_map& cmd) { @@ -51,16 +52,20 @@ void AktualizrInfoConfig::updateFromPropertyTree(const boost::property_tree::ptr } // from aktualizr config + CopySubtreeFromConfig(pacman, "bootloader", pt); CopySubtreeFromConfig(pacman, "pacman", pt); CopySubtreeFromConfig(storage, "storage", pt); + CopySubtreeFromConfig(storage, "uptane", pt); } void AktualizrInfoConfig::writeToStream(std::ostream& sink) const { // Keep this order the same as in aktualizr_info_config.h and // AktualizrInfoConfig::updateFromPropertyTree(). WriteSectionToStream(logger, "logger", sink); + WriteSectionToStream(logger, "bootloader", sink); WriteSectionToStream(pacman, "pacman", sink); WriteSectionToStream(storage, "storage", sink); + WriteSectionToStream(storage, "uptane", sink); } std::ostream& operator<<(std::ostream& os, const AktualizrInfoConfig& cfg) { diff --git a/src/aktualizr_info/aktualizr_info_config.h b/src/aktualizr_info/aktualizr_info_config.h index 5408a9358f..c7e3477013 100644 --- a/src/aktualizr_info/aktualizr_info_config.h +++ b/src/aktualizr_info/aktualizr_info_config.h @@ -1,13 +1,12 @@ #ifndef AKTUALIZR_INFO_CONFIG_H_ #define AKTUALIZR_INFO_CONFIG_H_ -#include -#include -#include +#include +#include +#include +#include -#include "logging/logging_config.h" -#include "package_manager/packagemanagerconfig.h" -#include "storage/storage_config.h" +#include "libaktualizr/config.h" #include "utilities/config_utils.h" // Try to keep the order of config options the same as in @@ -17,16 +16,18 @@ class AktualizrInfoConfig : public BaseConfig { public: AktualizrInfoConfig() = default; - AktualizrInfoConfig(const boost::program_options::variables_map& cmd); + explicit AktualizrInfoConfig(const boost::program_options::variables_map& cmd); explicit AktualizrInfoConfig(const boost::filesystem::path& filename); void postUpdateValues(); void writeToStream(std::ostream& sink) const; - // from primary config + // from Primary config LoggerConfig logger; + BootloaderConfig bootloader; PackageConfig pacman; StorageConfig storage; + UptaneConfig uptane; private: void updateFromCommandLine(const boost::program_options::variables_map& cmd); diff --git a/src/aktualizr_info/aktualizr_info_test.cc b/src/aktualizr_info/aktualizr_info_test.cc index 7a75a46292..9ca04409ca 100644 --- a/src/aktualizr_info/aktualizr_info_test.cc +++ b/src/aktualizr_info/aktualizr_info_test.cc @@ -1,9 +1,8 @@ #include -#include -#include +#include -#include "config/config.h" +#include "libaktualizr/config.h" #include "storage/sqlstorage.h" #include "test_utils.h" #include "utilities/utils.h" @@ -13,7 +12,7 @@ constexpr char warning_no_meta_data[] = "Metadata is not available\n"; class AktualizrInfoTest : public ::testing::Test { protected: AktualizrInfoTest() : test_conf_file_{test_dir_ / "conf.toml"}, test_db_file_{test_dir_ / "sql.db"} { - config_.pacman.type = PackageManager::kNone; + config_.pacman.type = PACKAGE_MANAGER_NONE; config_.storage.path = test_dir_.PathString(); config_.storage.sqldb_path = test_db_file_; // set it into 'trace' to see the aktualizr-info output @@ -82,16 +81,16 @@ class AktualizrInfoTest : public ::testing::Test { /** * Verifies an output of the aktualizr-info in a positive case when - * there are both primary and secondary present and a device is provisioned + * there are both Primary and Secondary present and a device is provisioned * and metadata are fetched from a server * * Checks actions: * * - [x] Print device ID - * - [x] Print primary ECU serial - * - [x] Print primary ECU hardware ID - * - [x] Print secondary ECU serials - * - [x] Print secondary ECU hardware IDs + * - [x] Print Primary ECU serial + * - [x] Print Primary ECU hardware ID + * - [x] Print Secondary ECU serials + * - [x] Print Secondary ECU hardware IDs * - [x] Print provisioning status, if provisioned * - [x] Print whether metadata has been fetched from the server, if they were fetched */ @@ -144,12 +143,12 @@ TEST_F(AktualizrInfoTest, PrintProvisioningAndMetadataNegative) { } /** - * Verifies an output of miscofigured secondary ECUs + * Verifies an output of miscofigured Secondary ECUs * * Checks actions: * - * - [x] Print secondary ECUs no longer accessible (miscofigured: old) - * - [x] Print secondary ECUs registered after provisioning (not registered) + * - [x] Print Secondary ECUs no longer accessible (miscofigured: old) + * - [x] Print Secondary ECUs registered after provisioning (not registered) */ TEST_F(AktualizrInfoTest, PrintSecondaryNotRegisteredOrRemoved) { const std::string provisioning_status = "Provisioned on server: yes"; @@ -166,9 +165,8 @@ TEST_F(AktualizrInfoTest, PrintSecondaryNotRegisteredOrRemoved) { db_storage_->storeEcuSerials({{primary_ecu_serial, primary_hw_id}, {secondary_ecu_serial, secondary_hw_id}}); db_storage_->storeEcuRegistered(); - db_storage_->storeMisconfiguredEcus( - {{secondary_ecu_serial_not_reg, secondary_hw_id_not_reg, EcuState::kNotRegistered}, - {secondary_ecu_serial_old, secondary_hw_id_old, EcuState::kOld}}); + db_storage_->saveMisconfiguredEcu({secondary_ecu_serial_not_reg, secondary_hw_id_not_reg, EcuState::kUnused}); + db_storage_->saveMisconfiguredEcu({secondary_ecu_serial_old, secondary_hw_id_old, EcuState::kOld}); aktualizr_info_process_.run(); ASSERT_FALSE(aktualizr_info_output.empty()); @@ -185,46 +183,46 @@ TEST_F(AktualizrInfoTest, PrintSecondaryNotRegisteredOrRemoved) { } /** - * Verifies aktualizr-info output of a root metadata from the images repository + * Verifies aktualizr-info output of a Root metadata from the Image repository * * Checks actions: * - * - [x] Print root metadata from images repository + * - [x] Print Root metadata from Image repository */ TEST_F(AktualizrInfoTest, PrintImageRootMetadata) { db_storage_->storeEcuSerials({{primary_ecu_serial, primary_hw_id}}); db_storage_->storeEcuRegistered(); - Json::Value images_root_json; - images_root_json["key-001"] = "value-002"; + Json::Value image_root_json; + image_root_json["key-001"] = "value-002"; - std::string images_root = Utils::jsonToStr(images_root_json); - db_storage_->storeRoot(images_root, Uptane::RepositoryType::Image(), Uptane::Version(1)); - db_storage_->storeRoot(images_root, Uptane::RepositoryType::Director(), Uptane::Version(1)); + std::string image_root = Utils::jsonToStr(image_root_json); + db_storage_->storeRoot(image_root, Uptane::RepositoryType::Image(), Uptane::Version(1)); + db_storage_->storeRoot(image_root, Uptane::RepositoryType::Director(), Uptane::Version(1)); aktualizr_info_process_.run(std::vector{"--images-root"}); ASSERT_FALSE(aktualizr_info_output.empty()); - EXPECT_NE(aktualizr_info_output.find(images_root), std::string::npos); + EXPECT_NE(aktualizr_info_output.find(image_root), std::string::npos); } /** - * Verifies aktualizr-info output of targets metadata from the image repository + * Verifies aktualizr-info output of Targets metadata from the Image repository * * Checks actions: * - * - [x] Print targets metadata from images repository + * - [x] Print Targets metadata from Image repository */ TEST_F(AktualizrInfoTest, PrintImageTargetsMetadata) { db_storage_->storeEcuSerials({{primary_ecu_serial, primary_hw_id}}); db_storage_->storeEcuRegistered(); - Json::Value images_root_json; - images_root_json["key-001"] = "value-002"; + Json::Value image_root_json; + image_root_json["key-001"] = "value-002"; - std::string images_root = Utils::jsonToStr(images_root_json); - db_storage_->storeRoot(images_root, Uptane::RepositoryType::Image(), Uptane::Version(1)); - db_storage_->storeRoot(images_root, Uptane::RepositoryType::Director(), Uptane::Version(1)); + std::string image_root = Utils::jsonToStr(image_root_json); + db_storage_->storeRoot(image_root, Uptane::RepositoryType::Image(), Uptane::Version(1)); + db_storage_->storeRoot(image_root, Uptane::RepositoryType::Director(), Uptane::Version(1)); Json::Value image_targets_json; image_targets_json["key-004"] = "value-005"; @@ -238,11 +236,11 @@ TEST_F(AktualizrInfoTest, PrintImageTargetsMetadata) { } /** - * Verifies aktualizr-info output of a root metadata from the director repository + * Verifies aktualizr-info output of a Root metadata from the Director repository * * Checks actions: * - * - [x] Print root metadata from director repository + * - [x] Print Root metadata from Director repository */ TEST_F(AktualizrInfoTest, PrintDirectorRootMetadata) { db_storage_->storeEcuSerials({{primary_ecu_serial, primary_hw_id}}); @@ -261,11 +259,11 @@ TEST_F(AktualizrInfoTest, PrintDirectorRootMetadata) { } /** - * Verifies aktualizr-info output of targets' metadata from the director repository + * Verifies aktualizr-info output of Targets metadata from the Director repository * * Checks actions: * - * - [x] Print targets metadata from director repository + * - [x] Print Targets metadata from Director repository */ TEST_F(AktualizrInfoTest, PrintDirectorTargetsMetadata) { db_storage_->storeEcuSerials({{primary_ecu_serial, primary_hw_id}}); @@ -289,11 +287,11 @@ TEST_F(AktualizrInfoTest, PrintDirectorTargetsMetadata) { } /** - * Verifies aktualizr-info output of the primary ECU keys + * Verifies aktualizr-info output of the Primary ECU keys * * Checks actions: * - * - [x] Print primary ECU keys + * - [x] Print Primary ECU keys * - [x] Print ECU public key * - [x] Print ECU private key */ @@ -301,18 +299,58 @@ TEST_F(AktualizrInfoTest, PrintPrimaryEcuKeys) { db_storage_->storeEcuSerials({{primary_ecu_serial, primary_hw_id}}); db_storage_->storeEcuRegistered(); - const std::string public_key = "public-key-1dc766fe-136d-4c6c-bdf4-daa79c49b3c8"; - const std::string private_key = "private-key-5cb805f1-859f-48b1-b787-8055d39b6c5f"; + const std::string public_keyid = "c2a42c620f56698f343c6746efa6a145cf93f4ddbd4e7b7017fbe78003c73e2b"; + const std::string public_key = + "-----BEGIN PUBLIC KEY-----\n" + "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxMhBei0MRQAEf3VtNa5T\n" + "/aa3l3r1ekMQ5Fh8eqj9SfQbuF1BgmjpYhV6NqZjqQiYbnpZWBEDJKqg9RL1D8rk\n" + "9ILSr7YGQDs34+Bt/4vmsZjghvex/N0tfxv85ckWmybiseZPXIwaCRx/B2QruXts\n" + "tUh3shfKOms2dWt7ZXP27mc66Qe8/aIf+gT4lL1zYammaGfBoNqj5/1HdguqM4aX\n" + "K/4g9fivqwEA4q4ejDheJJ8w8w4kUJGnPNi+GAgJHHX+lX68ZVgmiO/+uef453sd\n" + "Vwandii+Fw6B0monaGAYG0pQ3ZZ1Cgz5cAZGjL+P9eviDrgx4x7F2DDZHyfUNP3h\n" + "5wIDAQAB\n" + "-----END PUBLIC KEY-----\n"; + const std::string private_key = + "-----BEGIN RSA PRIVATE KEY-----\n" + "MIIEpAIBAAKCAQEAxMhBei0MRQAEf3VtNa5T/aa3l3r1ekMQ5Fh8eqj9SfQbuF1B\n" + "gmjpYhV6NqZjqQiYbnpZWBEDJKqg9RL1D8rk9ILSr7YGQDs34+Bt/4vmsZjghvex\n" + "/N0tfxv85ckWmybiseZPXIwaCRx/B2QruXtstUh3shfKOms2dWt7ZXP27mc66Qe8\n" + "/aIf+gT4lL1zYammaGfBoNqj5/1HdguqM4aXK/4g9fivqwEA4q4ejDheJJ8w8w4k\n" + "UJGnPNi+GAgJHHX+lX68ZVgmiO/+uef453sdVwandii+Fw6B0monaGAYG0pQ3ZZ1\n" + "Cgz5cAZGjL+P9eviDrgx4x7F2DDZHyfUNP3h5wIDAQABAoIBAE07s8c6CwjB2wIT\n" + "motpInn5hzEjB1m3HNgiiqixzsfJ0V9o6p8+gesHNvJgF9luEDW8O3i/JJatiYLm\n" + "r9xE69uzxPFF5eor0+HSYhncVOz7bZRLf0YZoRO0bmvZos++UVc1Z4yRSF6vGoRS\n" + "In8oHCCCksgJYkvPbI5lYwcMnqwuk50TBGAuGVPxamsCXhCETKJtclDX/ZMUmey2\n" + "psTqM76fjmzqhLLuSmurh+60VG3VCNueUVwrC/AW1xS07NzaQO28KZ/6AGFkXWWd\n" + "8Q6KSwKJ85qN4+qpsSKqNvzeva8OPWwWSFLBRRw8dwyvesmHUNncYeIReyM+nSMw\n" + "N0QkMgECgYEA7CS52/4K3y8coqkSSkeugRluSpCykd14YxvpyF1asq0MJcACpsUV\n" + "BJUWlqPAD9FM6ZvBNNrpDcV04YjDAzjLSNPN95TV7tS/eSrNqZ0Hd5lpYA0gVSq8\n" + "BQafuSlx/TTWIrreFc0v+eGq9WLHK6oPWDnGHgJbOYWEbn7WF858X4ECgYEA1VQ7\n" + "ZHrWtzAeJ9DohHUQNrz4LwseEu0Y+eqJ1PtxsX2eWW/gKa/4Ew4YUjOhD3ajcelf\n" + "ZcpzT/cdFk8Ya3zEHHKEU7ZMHKOPs0LpmFuYtxwOABXLanNIb/k9mvEkvTqIrYFf\n" + "QKxL2fC2VJiZCBDXeo2ImlUs6fgq1IsgckAN9WcCgYEAi2TKicAWbtSClMo0z8As\n" + "lGyMnFt57XzMecSaZfoldd+MkiQb7JHd7EyNfvK+hxfHzQZyMF8gv05VxmRSqW43\n" + "IZBVvtYOyuKu/Dl2Ga9mHwViHJ7i/SMyxcy5MDX04cD0vp+MRVZQAbNilWNvqqjC\n" + "UhQYjNJbQ0M7f3ZDrt3msQECgYEAoeOIJtppcx8a41BQA6Tqpv+Ev/6J1gcDuzRX\n" + "YL9oKi+QKYMS88/MTHmXz1nK0fdQVbOqZ47ZL0fyVOm1OGy4TnZBIV3oKJufA4S1\n" + "zJ9GJz8tCLeBZMkToZXdQGXbYZa3/iN9a5DVBxD67PvYthxByYj6r1QP/4YKyrzB\n" + "5LHjZeUCgYBFn5dKJ57ef+m0YelSf60Xa/ui5OodGmxgp9dC72WVsqTyePjQ8JSC\n" + "xRw2nRx80qFPGKwKeD7JO7nrPdCsgj41OQjIXgb2dTb+QDsSAAFcBSTIVPCa7Nb/\n" + "lbQDwseg8d8IrQyGvnMB6VDGt3rqd3UKt66h2PNRh13i0HYArfIAUQ==\n" + "-----END RSA PRIVATE KEY-----\n"; + db_storage_->storePrimaryKeys(public_key, private_key); aktualizr_info_process_.run({"--ecu-keys"}); ASSERT_FALSE(aktualizr_info_output.empty()); - EXPECT_NE(aktualizr_info_output.find("Public key:"), std::string::npos); - EXPECT_NE(aktualizr_info_output.find(public_key), std::string::npos); + EXPECT_NE(aktualizr_info_output.find("Public key ID: " + public_keyid), std::string::npos); + EXPECT_NE(aktualizr_info_output.find("Public key:\n" + public_key), std::string::npos); + EXPECT_NE(aktualizr_info_output.find("Private key:\n" + private_key), std::string::npos); - EXPECT_NE(aktualizr_info_output.find("Private key:"), std::string::npos); - EXPECT_NE(aktualizr_info_output.find(private_key), std::string::npos); + aktualizr_info_process_.run({"--ecu-keyid"}); + ASSERT_FALSE(aktualizr_info_output.empty()); + EXPECT_NE(aktualizr_info_output.find(public_keyid), std::string::npos); aktualizr_info_process_.run({"--ecu-pub-key"}); ASSERT_FALSE(aktualizr_info_output.empty()); @@ -369,11 +407,11 @@ TEST_F(AktualizrInfoTest, PrintTlsCredentials) { } /** - * Verifies aktualizr-info output of the primary ECU's current and pending versions + * Verifies aktualizr-info output of the Primary ECU's current and pending versions * * Checks actions: * - * - [x] Print primary ECU current and pending versions + * - [x] Print Primary ECU's current and pending versions */ TEST_F(AktualizrInfoTest, PrintPrimaryEcuCurrentAndPendingVersions) { db_storage_->storeEcuSerials({{primary_ecu_serial, primary_hw_id}}); @@ -384,26 +422,26 @@ TEST_F(AktualizrInfoTest, PrintPrimaryEcuCurrentAndPendingVersions) { Uptane::EcuMap ecu_map{{primary_ecu_serial, primary_hw_id}}; db_storage_->savePrimaryInstalledVersion( - {"update.bin", ecu_map, {{Uptane::Hash::Type::kSha256, current_ecu_version}}, 1, "corrid"}, + {"update.bin", ecu_map, {{Hash::Type::kSha256, current_ecu_version}}, 1, "corrid"}, InstalledVersionUpdateMode::kCurrent); db_storage_->savePrimaryInstalledVersion( - {"update-01.bin", ecu_map, {{Uptane::Hash::Type::kSha256, pending_ecu_version}}, 1, "corrid-01"}, + {"update-01.bin", ecu_map, {{Hash::Type::kSha256, pending_ecu_version}}, 1, "corrid-01"}, InstalledVersionUpdateMode::kPending); aktualizr_info_process_.run(); ASSERT_FALSE(aktualizr_info_output.empty()); - EXPECT_NE(aktualizr_info_output.find("Current primary ecu running version: " + current_ecu_version), + EXPECT_NE(aktualizr_info_output.find("Current Primary ECU running version: " + current_ecu_version), std::string::npos); - EXPECT_NE(aktualizr_info_output.find("Pending primary ecu version: " + pending_ecu_version), std::string::npos); + EXPECT_NE(aktualizr_info_output.find("Pending Primary ECU version: " + pending_ecu_version), std::string::npos); } /** - * Verifies aktualizr-info output of the primary ECU's current and pending versions negative test + * Verifies aktualizr-info output of the Primary ECU's current and pending versions negative test * * Checks actions: * - * - [x] Print primary ECU current and pending versions + * - [x] Print Primary ECU's current and pending versions */ TEST_F(AktualizrInfoTest, PrintPrimaryEcuCurrentAndPendingVersionsNegative) { db_storage_->storeEcuSerials({{primary_ecu_serial, primary_hw_id}}); @@ -418,31 +456,31 @@ TEST_F(AktualizrInfoTest, PrintPrimaryEcuCurrentAndPendingVersionsNegative) { EXPECT_NE(aktualizr_info_output.find(primary_ecu_serial.ToString()), std::string::npos); EXPECT_NE(aktualizr_info_output.find(primary_hw_id.ToString()), std::string::npos); - EXPECT_NE(aktualizr_info_output.find("No currently running version on primary ecu"), std::string::npos); - EXPECT_EQ(aktualizr_info_output.find("Pending primary ecu version:"), std::string::npos); + EXPECT_NE(aktualizr_info_output.find("No currently running version on Primary ECU"), std::string::npos); + EXPECT_EQ(aktualizr_info_output.find("Pending Primary ECU version:"), std::string::npos); Uptane::EcuMap ecu_map{{primary_ecu_serial, primary_hw_id}}; db_storage_->savePrimaryInstalledVersion( - {"update-01.bin", ecu_map, {{Uptane::Hash::Type::kSha256, pending_ecu_version}}, 1, "corrid-01"}, + {"update-01.bin", ecu_map, {{Hash::Type::kSha256, pending_ecu_version}}, 1, "corrid-01"}, InstalledVersionUpdateMode::kPending); aktualizr_info_process_.run(); ASSERT_FALSE(aktualizr_info_output.empty()); - EXPECT_NE(aktualizr_info_output.find("No currently running version on primary ecu"), std::string::npos); - EXPECT_NE(aktualizr_info_output.find("Pending primary ecu version: " + pending_ecu_version), std::string::npos); + EXPECT_NE(aktualizr_info_output.find("No currently running version on Primary ECU"), std::string::npos); + EXPECT_NE(aktualizr_info_output.find("Pending Primary ECU version: " + pending_ecu_version), std::string::npos); db_storage_->savePrimaryInstalledVersion( - {"update-01.bin", ecu_map, {{Uptane::Hash::Type::kSha256, pending_ecu_version}}, 1, "corrid-01"}, + {"update-01.bin", ecu_map, {{Hash::Type::kSha256, pending_ecu_version}}, 1, "corrid-01"}, InstalledVersionUpdateMode::kCurrent); aktualizr_info_process_.run(); ASSERT_FALSE(aktualizr_info_output.empty()); - // pending ecu version became the current now - EXPECT_NE(aktualizr_info_output.find("Current primary ecu running version: " + pending_ecu_version), + // pending ECU version became the current now + EXPECT_NE(aktualizr_info_output.find("Current Primary ECU running version: " + pending_ecu_version), std::string::npos); - EXPECT_EQ(aktualizr_info_output.find("Pending primary ecu version:"), std::string::npos); + EXPECT_EQ(aktualizr_info_output.find("Pending Primary ECU version:"), std::string::npos); } /** @@ -450,7 +488,7 @@ TEST_F(AktualizrInfoTest, PrintPrimaryEcuCurrentAndPendingVersionsNegative) { * * Checks actions: * - * - [x] Print secondary ECU current and pending versions + * - [x] Print Secondary ECU current and pending versions */ TEST_F(AktualizrInfoTest, PrintSecondaryEcuCurrentAndPendingVersions) { const Uptane::EcuSerial secondary_ecu_serial{"c6998d3e-2a68-4ac2-817e-4ea6ef87d21f"}; @@ -464,14 +502,13 @@ TEST_F(AktualizrInfoTest, PrintSecondaryEcuCurrentAndPendingVersions) { db_storage_->storeEcuRegistered(); Uptane::EcuMap ecu_map{{secondary_ecu_serial, secondary_hw_id}}; - db_storage_->saveInstalledVersion( - secondary_ecu_serial.ToString(), - {secondary_ecu_filename, ecu_map, {{Uptane::Hash::Type::kSha256, current_ecu_version}}, 1}, - InstalledVersionUpdateMode::kCurrent); + db_storage_->saveInstalledVersion(secondary_ecu_serial.ToString(), + {secondary_ecu_filename, ecu_map, {{Hash::Type::kSha256, current_ecu_version}}, 1}, + InstalledVersionUpdateMode::kCurrent); db_storage_->saveInstalledVersion( secondary_ecu_serial.ToString(), - {secondary_ecu_filename_update, ecu_map, {{Uptane::Hash::Type::kSha256, pending_ecu_version}}, 1}, + {secondary_ecu_filename_update, ecu_map, {{Hash::Type::kSha256, pending_ecu_version}}, 1}, InstalledVersionUpdateMode::kPending); aktualizr_info_process_.run(); @@ -482,7 +519,29 @@ TEST_F(AktualizrInfoTest, PrintSecondaryEcuCurrentAndPendingVersions) { EXPECT_NE(aktualizr_info_output.find("pending image hash: " + pending_ecu_version), std::string::npos); EXPECT_NE(aktualizr_info_output.find("pending image filename: " + secondary_ecu_filename_update), std::string::npos); - // negative test, no any installed images + // Add Secondary public key and test that too. + const std::string secondary_key_raw = + "-----BEGIN PUBLIC KEY-----\n" + "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4R0QC/aI2375auDXdRM7\n" + "SQekXkGG72VmJxUXQmSmo8RiExkZWabJmrcGhqLMYGWfPNfUzxzMze3k30PAYdRK\n" + "TwxOERmIDSYy2lBclfjLskpQF/z3mwRNlDfh1OI9gLFR9BGF7oDd4s2yWPRhAL1c\n" + "hborUz1KeTv60kE26Wm/efmY/Kka4I0iR4YfOUOI7xFAs3ONYAPx19KvcXkIjTGT\n" + "BgdkSJUrlpuP0f2C8Tm8kCC923owB3ZxaYkmVYDmKar4CC5f8lf4eBrigkkC6ybb\n" + "m7ggeNCp38M1gOkSMdmH1vhMkgSRqMFegw4wdoxcda/sjLG8sRk6/al5+cBvFRdq\n" + "awIDAQAB\n" + "-----END PUBLIC KEY-----\n"; + const PublicKey secondary_key(secondary_key_raw, KeyType::kRSA2048); + db_storage_->saveSecondaryInfo(secondary_ecu_serial, "secondary-type", secondary_key); + + aktualizr_info_process_.run({"--secondary-keys"}); + ASSERT_FALSE(aktualizr_info_output.empty()); + + EXPECT_NE(aktualizr_info_output.find("public key ID: " + secondary_key.KeyId()), std::string::npos) + << aktualizr_info_output; + EXPECT_NE(aktualizr_info_output.find("public key:\n" + secondary_key_raw), std::string::npos) + << aktualizr_info_output; + + // negative test without any installed images db_storage_->clearInstalledVersions(); db_storage_->clearEcuSerials(); db_storage_->storeEcuSerials({{primary_ecu_serial, primary_hw_id}, {secondary_ecu_serial, secondary_hw_id}}); @@ -544,7 +603,7 @@ TEST_F(AktualizrInfoTest, PrintDelegations) { } }; - // aktualizer-info won't print anything if a director root metadata are not stored in the DB + // aktualizr-info won't print anything if Director Root metadata are not stored in the DB db_storage_->storeRoot(Utils::jsonToStr(Json::Value()), Uptane::RepositoryType::Director(), Uptane::Version(1)); // case 0: no delegations in the DB @@ -580,11 +639,11 @@ TEST_F(AktualizrInfoTest, PrintDelegations) { } /** - * Verifies aktualizr-info output of snapshot metadata from image repository + * Verifies aktualizr-info output of Snapshot metadata from Image repository * * Checks actions: * - * - [x] Print snapshot metadata from image repository + * - [x] Print Snapshot metadata from Image repository */ TEST_F(AktualizrInfoTest, PrintImageSnapshotMetadata) { Json::Value director_root_json; @@ -596,21 +655,21 @@ TEST_F(AktualizrInfoTest, PrintImageSnapshotMetadata) { meta_snapshot["signed"]["_type"] = "Snapshot"; meta_snapshot["signed"]["expires"] = "2038-01-19T03:14:06Z"; meta_snapshot["signed"]["version"] = "2"; - std::string images_snapshot = Utils::jsonToStr(meta_snapshot); - db_storage_->storeNonRoot(images_snapshot, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()); + std::string image_snapshot = Utils::jsonToStr(meta_snapshot); + db_storage_->storeNonRoot(image_snapshot, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()); aktualizr_info_process_.run({"--images-snapshot"}); ASSERT_FALSE(aktualizr_info_output.empty()); - EXPECT_NE(aktualizr_info_output.find(images_snapshot), std::string::npos); + EXPECT_NE(aktualizr_info_output.find(image_snapshot), std::string::npos); } /** - * Verifies aktualizr-info output of a timestamp metadata from the image repository + * Verifies aktualizr-info output of Timestamp metadata from the Image repository * * Checks actions: * - * - [x] Print timestamp metadata from image repository + * - [x] Print Timestamp metadata from Image repository */ TEST_F(AktualizrInfoTest, PrintImageTimestampMetadata) { Json::Value director_root_json; @@ -621,13 +680,13 @@ TEST_F(AktualizrInfoTest, PrintImageTimestampMetadata) { Json::Value meta_timestamp; meta_timestamp["signed"]["_type"] = "Timestamp"; meta_timestamp["signed"]["expires"] = "2038-01-19T03:14:06Z"; - std::string images_timestamp = Utils::jsonToStr(meta_timestamp); - db_storage_->storeNonRoot(images_timestamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()); + std::string image_timestamp = Utils::jsonToStr(meta_timestamp); + db_storage_->storeNonRoot(image_timestamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()); aktualizr_info_process_.run({"--images-timestamp"}); ASSERT_FALSE(aktualizr_info_output.empty()); - EXPECT_NE(aktualizr_info_output.find(images_timestamp), std::string::npos); + EXPECT_NE(aktualizr_info_output.find(image_timestamp), std::string::npos); } /** diff --git a/src/aktualizr_info/main.cc b/src/aktualizr_info/main.cc index 70c67fba3b..8b09b35d85 100644 --- a/src/aktualizr_info/main.cc +++ b/src/aktualizr_info/main.cc @@ -1,16 +1,19 @@ -#include -#include #include #include #include +#include +#include + +#include "libaktualizr/packagemanagerfactory.h" + #include "aktualizr_info_config.h" #include "logging/logging.h" -#include "package_manager/packagemanagerfactory.h" #include "storage/invstorage.h" #include "storage/sql_utils.h" +#include "utilities/aktualizr_version.h" -namespace po = boost::program_options; +namespace bpo = boost::program_options; static int loadAndPrintDelegations(const std::shared_ptr &storage) { std::vector > delegations; @@ -21,7 +24,7 @@ static int loadAndPrintDelegations(const std::shared_ptr &storage) { return EXIT_FAILURE; } - if (delegations.size() > 0) { + if (!delegations.empty()) { for (const auto &delegation : delegations) { std::cout << delegation.first << ": " << delegation.second << std::endl; } @@ -31,50 +34,82 @@ static int loadAndPrintDelegations(const std::shared_ptr &storage) { return EXIT_SUCCESS; } +void checkInfoOptions(const bpo::options_description &description, const bpo::variables_map &vm) { + if (vm.count("help") != 0) { + std::cout << description << '\n'; + exit(EXIT_SUCCESS); + } + if (vm.count("version") != 0) { + std::cout << "Current aktualizr-info version is: " << aktualizr_version() << "\n"; + exit(EXIT_SUCCESS); + } +} + int main(int argc, char **argv) { - po::options_description desc("aktualizr-info command line options"); + bpo::options_description all("aktualizr-info command line options"); + bpo::options_description description("aktualizr-info command line options"); + bpo::options_description hidden("deprecated options"); // clang-format off - desc.add_options() + description.add_options() ("help,h", "print usage") - ("config,c", po::value >()->composing(), "configuration file or directory") - ("loglevel", po::value(), "set log level 0-5 (trace, debug, info, warning, error, fatal)") + ("version,v", "Current aktualizr version") + ("config,c", bpo::value >()->composing(), "configuration file or directory") + ("loglevel", bpo::value(), "set log level 0-5 (trace, debug, info, warning, error, fatal)") ("name-only", "Only output device name (intended for scripting). Cannot be used in combination with other arguments.") ("tls-creds", "Outputs TLS credentials") ("tls-root-ca", "Outputs TLS Root CA") ("tls-cert", "Outputs TLS client certificate") ("tls-prv-key", "Output TLS client private key") - ("ecu-keys", "Outputs UPTANE keys") - ("ecu-pub-key", "Outputs UPTANE public key") - ("ecu-prv-key", "Outputs UPTANE private key") - ("images-root", "Outputs root.json from images repo") - ("images-timestamp", "Outputs timestamp.json from images repo") - ("images-snapshot", "Outputs snapshot.json from images repo") - ("images-target", "Outputs targets.json from images repo") - ("delegation", "Outputs metadata of image repo targets' delegations") - ("director-root", "Outputs root.json from director repo") - ("director-target", "Outputs targets.json from director repo") + ("ecu-keys", "Outputs Primary's Uptane keys") + ("ecu-keyid", "Outputs Primary's Uptane public key ID") + ("ecu-pub-key", "Outputs Primary's Uptane public key") + ("ecu-prv-key", "Outputs Primary's Uptane private key") + ("secondary-keys", "Outputs Secondaries' Uptane public keys") + ("image-root", "Outputs root.json from Image repo") + ("image-timestamp", "Outputs timestamp.json from Image repo") + ("image-snapshot", "Outputs snapshot.json from Image repo") + ("image-targets", "Outputs targets.json from Image repo") + ("delegation", "Outputs metadata of Image repo Targets' delegations") + ("director-root", "Outputs root.json from Director repo") + ("director-targets", "Outputs targets.json from Director repo") ("allow-migrate", "Opens database in read/write mode to make possible to migrate database if needed") ("wait-until-provisioned", "Outputs metadata when device already provisioned"); + // Support old names and variations due to common typos. + hidden.add_options() + ("images-root", "Outputs root.json from Image repo") + ("images-timestamp", "Outputs timestamp.json from Image repo") + ("images-snapshot", "Outputs snapshot.json from Image repo") + ("images-target", "Outputs targets.json from Image repo") + ("images-targets", "Outputs targets.json from Image repo") + ("image-target", "Outputs targets.json from Image repo") + ("director-target", "Outputs targets.json from Director repo"); // clang-format on try { - po::variables_map vm; - po::basic_parsed_options parsed_options = po::command_line_parser(argc, argv).options(desc).run(); - po::store(parsed_options, vm); - po::notify(vm); - if (vm.count("help") != 0) { - std::cout << desc << '\n'; - exit(EXIT_SUCCESS); + all.add(description).add(hidden); + bpo::variables_map vm; + bpo::basic_parsed_options parsed_options = bpo::command_line_parser(argc, argv).options(all).run(); + bpo::store(parsed_options, vm); + checkInfoOptions(description, vm); + bpo::notify(vm); + std::vector unregistered_options = + bpo::collect_unrecognized(parsed_options.options, bpo::include_positional); + if (vm.count("help") == 0 && !unregistered_options.empty()) { + std::cout << description << "\n"; + exit(EXIT_FAILURE); } - if (vm.count("loglevel") == 0u) { + logger_init(); + if (vm.count("loglevel") == 0U) { logger_set_enable(false); } AktualizrInfoConfig config(vm); + bool secondary_db = false; + bool readonly = true; - if (vm.count("allow-migrate") != 0u) { + if (vm.count("allow-migrate") != 0U) { readonly = false; } @@ -107,47 +142,55 @@ int main(int argc, char **argv) { storage = INvStorage::newStorage(config.storage, readonly); } - if (!storage->loadDeviceId(&device_id)) { - std::cout << "Couldn't load device ID" << std::endl; - } else { + bool deviceid_loaded = false; + if (storage->loadDeviceId(&device_id)) { + deviceid_loaded = true; // Early return if only printing device ID. - if (vm.count("name-only") != 0u) { + if (vm.count("name-only") != 0U) { std::cout << device_id << std::endl; return EXIT_SUCCESS; } } registered = registered || storage->loadEcuRegistered(); - has_metadata = has_metadata || storage->loadLatestRoot(&director_root, Uptane::RepositoryType::Director()); + std::string temp; + has_metadata = has_metadata || storage->loadLatestRoot(&director_root, Uptane::RepositoryType::Director()) || + storage->loadLatestRoot(&temp, Uptane::RepositoryType::Image()); - // TLS credentials - if (vm.count("tls-creds") != 0u) { + bool tlscred_loaded = false; + { std::string ca; std::string cert; std::string pkey; storage->loadTlsCreds(&ca, &cert, &pkey); - std::cout << "Root CA certificate:" << std::endl << ca << std::endl; - std::cout << "Client certificate:" << std::endl << cert << std::endl; - std::cout << "Client private key:" << std::endl << pkey << std::endl; - cmd_trigger = true; + if (!ca.empty() || !cert.empty() || !pkey.empty()) { + tlscred_loaded = true; + } + // TLS credentials + if (vm.count("tls-creds") != 0U) { + std::cout << "Root CA certificate:" << std::endl << ca << std::endl; + std::cout << "Client certificate:" << std::endl << cert << std::endl; + std::cout << "Client private key:" << std::endl << pkey << std::endl; + cmd_trigger = true; + } } - if (vm.count("tls-root-ca") != 0u) { + if (vm.count("tls-root-ca") != 0U) { std::string ca; storage->loadTlsCa(&ca); std::cout << ca << std::endl; cmd_trigger = true; } - if (vm.count("tls-cert") != 0u) { + if (vm.count("tls-cert") != 0U) { std::string cert; storage->loadTlsCert(&cert); std::cout << cert << std::endl; cmd_trigger = true; } - if (vm.count("tls-prv-key") != 0u) { + if (vm.count("tls-prv-key") != 0U) { std::string key; storage->loadTlsPkey(&key); std::cout << key << std::endl; @@ -155,33 +198,59 @@ int main(int argc, char **argv) { } // ECU credentials - if (vm.count("ecu-keys") != 0u) { - std::string priv; - std::string pub; + bool ecukeys_loaded = false; + std::string priv; + std::string pub; + storage->loadPrimaryKeys(&pub, &priv); + if (!pub.empty() && !priv.empty()) { + ecukeys_loaded = true; + } - storage->loadPrimaryKeys(&pub, &priv); - std::cout << "Public key:" << std::endl << pub << std::endl; - std::cout << "Private key:" << std::endl << priv << std::endl; - cmd_trigger = true; + if (vm.count("ecu-keys") != 0U) { + if (!ecukeys_loaded) { + std::cout << "Failed to load Primary ECU keys!" << std::endl; + } else { + // TODO: probably won't work with p11. + PublicKey pubkey(pub, config.uptane.key_type); + std::cout << "Public key ID: " << pubkey.KeyId() << std::endl; + std::cout << "Public key:" << std::endl << pub << std::endl; + std::cout << "Private key:" << std::endl << priv << std::endl; + cmd_trigger = true; + } } - if (vm.count("ecu-pub-key") != 0u) { - std::string key; - storage->loadPrimaryPublic(&key); - std::cout << key << std::endl; - return EXIT_SUCCESS; + if (vm.count("ecu-keyid") != 0U) { + if (!ecukeys_loaded) { + std::cout << "Failed to load Primary ECU keys!" << std::endl; + } else { + // TODO: probably won't work with p11. + PublicKey pubkey(pub, config.uptane.key_type); + std::cout << pubkey.KeyId() << std::endl; + cmd_trigger = true; + } } - if (vm.count("ecu-prv-key") != 0u) { - std::string key; - storage->loadPrimaryPrivate(&key); - std::cout << key << std::endl; - cmd_trigger = true; + if (vm.count("ecu-pub-key") != 0U) { + if (!ecukeys_loaded) { + std::cout << "Failed to load Primary ECU keys!" << std::endl; + } else { + std::cout << pub << std::endl; + cmd_trigger = true; + } + } + + if (vm.count("ecu-prv-key") != 0U) { + if (!ecukeys_loaded) { + std::cout << "Failed to load Primary ECU keys!" << std::endl; + } else { + std::cout << priv << std::endl; + cmd_trigger = true; + } } // An arguments which depend on metadata. std::string msg_metadata_fail = "Metadata is not available"; - if (vm.count("images-root") != 0u) { + if (vm.count("image-root") != 0U || vm.count("images-root") != 0U) { if (!has_metadata) { std::cout << msg_metadata_fail << std::endl; } else { @@ -192,7 +261,8 @@ int main(int argc, char **argv) { cmd_trigger = true; } - if (vm.count("images-target") != 0u) { + if (vm.count("image-targets") != 0U || vm.count("image-target") != 0U || vm.count("images-targets") != 0U || + vm.count("images-target") != 0U) { if (!has_metadata) { std::cout << msg_metadata_fail << std::endl; } else { @@ -203,7 +273,7 @@ int main(int argc, char **argv) { cmd_trigger = true; } - if (vm.count("delegation") != 0u) { + if (vm.count("delegation") != 0U) { if (!has_metadata) { std::cout << msg_metadata_fail << std::endl; } else { @@ -212,7 +282,7 @@ int main(int argc, char **argv) { cmd_trigger = true; } - if (vm.count("director-root") != 0u) { + if (vm.count("director-root") != 0U) { if (!has_metadata) { std::cout << msg_metadata_fail << std::endl; } else { @@ -221,7 +291,7 @@ int main(int argc, char **argv) { cmd_trigger = true; } - if (vm.count("director-target") != 0u) { + if (vm.count("director-targets") != 0U || vm.count("director-target") != 0U) { if (!has_metadata) { std::cout << msg_metadata_fail << std::endl; } else { @@ -232,7 +302,7 @@ int main(int argc, char **argv) { cmd_trigger = true; } - if (vm.count("images-snapshot") != 0u) { + if (vm.count("image-snapshot") != 0U || vm.count("images-snapshot") != 0U) { if (!has_metadata) { std::cout << msg_metadata_fail << std::endl; } else { @@ -243,7 +313,7 @@ int main(int argc, char **argv) { cmd_trigger = true; } - if (vm.count("images-timestamp") != 0u) { + if (vm.count("image-timestamp") != 0U || vm.count("images-timestamp") != 0U) { if (!has_metadata) { std::cout << msg_metadata_fail << std::endl; } else { @@ -258,31 +328,52 @@ int main(int argc, char **argv) { return EXIT_SUCCESS; } + if (!deviceid_loaded && !tlscred_loaded && ecukeys_loaded) { + secondary_db = true; + } + // Print general information if user does not provide any argument. - std::cout << "Device ID: " << device_id << std::endl; + if (!secondary_db) { + if (!deviceid_loaded) { + std::cout << "Couldn't load device ID" << std::endl; + } else { + std::cout << "Device ID: " << device_id << std::endl; + } + } + + std::string ecu_name = secondary_db ? "Secondary" : "Primary"; EcuSerials serials; if (!storage->loadEcuSerials(&serials)) { std::cout << "Couldn't load ECU serials" << std::endl; - } else if (serials.size() == 0) { - std::cout << "Primary serial is not found" << std::endl; + } else if (serials.empty()) { + std::cout << ecu_name << " serial is not found" << std::endl; } else { - std::cout << "Primary ecu serial ID: " << serials[0].first << std::endl; - std::cout << "Primary ecu hardware ID: " << serials[0].second << std::endl; + std::cout << ecu_name << " ECU serial ID: " << serials[0].first << std::endl; + std::cout << ecu_name << " ECU hardware ID: " << serials[0].second << std::endl; } if (serials.size() > 1) { + std::vector info; + if (vm.count("secondary-keys") != 0U) { + storage->loadSecondariesInfo(&info); + if (info.empty()) { + std::cout << "Failed to load Secondary info!" << std::endl; + } + } + auto it = serials.begin() + 1; std::cout << "Secondaries:\n"; int secondary_number = 1; for (; it != serials.end(); ++it) { - std::cout << secondary_number++ << ") serial ID: " << it->first << std::endl; + const Uptane::EcuSerial serial = it->first; + std::cout << secondary_number++ << ") serial ID: " << serial << std::endl; std::cout << " hardware ID: " << it->second << std::endl; boost::optional current_version; boost::optional pending_version; auto load_installed_version_res = - storage->loadInstalledVersions((it->first).ToString(), ¤t_version, &pending_version); + storage->loadInstalledVersions(serial.ToString(), ¤t_version, &pending_version); if (!load_installed_version_res || (!current_version && !pending_version)) { std::cout << " no details about installed nor pending images\n"; @@ -296,13 +387,26 @@ int main(int argc, char **argv) { std::cout << " pending image filename: " << pending_version->filename() << "\n"; } } + + if (vm.count("secondary-keys") != 0U) { + auto f = std::find_if(info.cbegin(), info.cend(), + [&serial](const SecondaryInfo &i) { return serial == i.serial; }); + if (f == info.cend()) { + std::cout << " Failed to find matching Secondary info!" << std::endl; + } else { + std::cout << " public key ID: " << f->pub_key.KeyId() << std::endl; + std::cout << " public key:" << std::endl << f->pub_key.Value() << std::endl; + } + } } + } else if (vm.count("secondary-keys") != 0U) { + std::cout << "Failed to load Secondary data!" << std::endl; } std::vector misconfigured_ecus; storage->loadMisconfiguredEcus(&misconfigured_ecus); - if (misconfigured_ecus.size() != 0u) { - std::cout << "Removed or not registered ecus:" << std::endl; + if (!misconfigured_ecus.empty()) { + std::cout << "Removed or unregistered ECUs (deprecated):" << std::endl; std::vector::const_iterator it; for (it = misconfigured_ecus.begin(); it != misconfigured_ecus.end(); ++it) { std::cout << " '" << it->serial << "' with hardware_id '" << it->hardware_id << "' " @@ -310,17 +414,19 @@ int main(int argc, char **argv) { } } - std::cout << "Provisioned on server: " << (registered ? "yes" : "no") << std::endl; + if (!secondary_db) { + std::cout << "Provisioned on server: " << (registered ? "yes" : "no") << std::endl; + } std::cout << "Fetched metadata: " << (has_metadata ? "yes" : "no") << std::endl; - auto pacman = PackageManagerFactory::makePackageManager(config.pacman, storage, nullptr, nullptr); + auto pacman = PackageManagerFactory::makePackageManager(config.pacman, config.bootloader, storage, nullptr); Uptane::Target current_target = pacman->getCurrent(); if (current_target.IsValid()) { - std::cout << "Current primary ecu running version: " << current_target.sha256Hash() << std::endl; + std::cout << "Current " << ecu_name << " ECU running version: " << current_target.sha256Hash() << std::endl; } else { - std::cout << "No currently running version on primary ecu" << std::endl; + std::cout << "No currently running version on " << ecu_name << " ECU" << std::endl; } std::vector installed_versions; @@ -328,11 +434,10 @@ int main(int argc, char **argv) { storage->loadPrimaryInstalledVersions(nullptr, &pending); if (!!pending) { - std::cout << "Pending primary ecu version: " << pending->sha256Hash() << std::endl; + std::cout << "Pending " << ecu_name << " ECU version: " << pending->sha256Hash() << std::endl; } - } catch (const po::error &o) { - std::cout << o.what() << std::endl; - std::cout << desc; + } catch (const bpo::error &o) { + std::cout << o.what() << std::endl << description; return EXIT_FAILURE; } catch (const std::exception &exc) { diff --git a/src/aktualizr_lite/CMakeLists.txt b/src/aktualizr_lite/CMakeLists.txt deleted file mode 100644 index 82f5602b7f..0000000000 --- a/src/aktualizr_lite/CMakeLists.txt +++ /dev/null @@ -1,30 +0,0 @@ -set(AKTUALIZR_LITE_SRC main.cc helpers.cc) -set(AKTUALIZR_LITE_HEADERS helpers.h) - -if(BUILD_OSTREE) -add_executable(aktualizr-lite ${AKTUALIZR_LITE_SRC}) -target_link_libraries(aktualizr-lite aktualizr_static_lib ${AKTUALIZR_EXTERNAL_LIBS}) - -install(TARGETS aktualizr-lite RUNTIME DESTINATION bin COMPONENT aktualizr-lite) - -set(TEST_SOURCES test_lite.sh) - -add_dependencies(build_tests aktualizr-lite) - -add_test(test_aktualizr-lite - ${CMAKE_CURRENT_SOURCE_DIR}/test_lite.sh - ${CMAKE_BINARY_DIR}/src/aktualizr_lite/aktualizr-lite - ${CMAKE_BINARY_DIR}/src/uptane_generator/uptane-generator - ${PROJECT_SOURCE_DIR}/tests - ${RUN_VALGRIND} -) -add_library(t_lite-mock SHARED ostree_mock.cc) -add_aktualizr_test(NAME lite-helpers SOURCES helpers.cc helpers_test.cc - ARGS ${PROJECT_BINARY_DIR}/ostree_repo) -set_tests_properties(test_lite-helpers PROPERTIES - ENVIRONMENT LD_PRELOAD=$) - -endif(BUILD_OSTREE) - -aktualizr_source_file_checks(main.cc ${AKTUALIZR_LITE_SRC} ${AKTUALIZR_LITE_HEADERS} helpers_test.cc ostree_mock.cc) -# vim: set tabstop=4 shiftwidth=4 expandtab: diff --git a/src/aktualizr_lite/helpers.cc b/src/aktualizr_lite/helpers.cc deleted file mode 100644 index 1c2e54d909..0000000000 --- a/src/aktualizr_lite/helpers.cc +++ /dev/null @@ -1,58 +0,0 @@ -#include "helpers.h" - -#include -#include - -#include "package_manager/ostreemanager.h" - -static void finalizeIfNeeded(INvStorage &storage, PackageConfig &config) { - boost::optional pending_version; - storage.loadInstalledVersions("", nullptr, &pending_version); - - if (!!pending_version) { - GObjectUniquePtr sysroot_smart = OstreeManager::LoadSysroot(config.sysroot); - OstreeDeployment *booted_deployment = ostree_sysroot_get_booted_deployment(sysroot_smart.get()); - if (booted_deployment == nullptr) { - throw std::runtime_error("Could not get booted deployment in " + config.sysroot.string()); - } - std::string current_hash = ostree_deployment_get_csum(booted_deployment); - - const Uptane::Target &target = *pending_version; - if (current_hash == target.sha256Hash()) { - LOG_INFO << "Marking target install complete for: " << target; - storage.saveInstalledVersion("", target, InstalledVersionUpdateMode::kCurrent); - } - } -} - -LiteClient::LiteClient(Config &config_in) : config(std::move(config_in)) { - std::string pkey; - storage = INvStorage::newStorage(config.storage); - storage->importData(config.import); - - EcuSerials ecu_serials; - if (!storage->loadEcuSerials(&ecu_serials)) { - // Set a "random" serial so we don't get warning messages. - std::string serial = config.provision.primary_ecu_serial; - std::string hwid = config.provision.primary_ecu_hardware_id; - if (hwid.empty()) { - hwid = Utils::getHostname(); - } - if (serial.empty()) { - boost::uuids::uuid tmp = boost::uuids::random_generator()(); - serial = boost::uuids::to_string(tmp); - } - ecu_serials.emplace_back(Uptane::EcuSerial(serial), Uptane::HardwareIdentifier(hwid)); - storage->storeEcuSerials(ecu_serials); - } - - auto http_client = std::make_shared(); - auto bootloader = std::make_shared(config.bootloader, *storage); - auto report_queue = std::make_shared(config, http_client); - - KeyManager keys(storage, config.keymanagerConfig()); - keys.copyCertsToCurl(*http_client); - - primary = std::make_shared(config, storage, http_client, bootloader, report_queue); - finalizeIfNeeded(*storage, config.pacman); -} diff --git a/src/aktualizr_lite/helpers.h b/src/aktualizr_lite/helpers.h deleted file mode 100644 index 41ca0393ca..0000000000 --- a/src/aktualizr_lite/helpers.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef AKTUALIZR_LITE_HELPERS -#define AKTUALIZR_LITE_HELPERS - -#include - -#include - -#include "primary/sotauptaneclient.h" - -struct Version { - std::string raw_ver; - Version(std::string version) : raw_ver(std::move(version)) {} - - bool operator<(const Version& other) { return strverscmp(raw_ver.c_str(), other.raw_ver.c_str()) < 0; } -}; - -struct LiteClient { - LiteClient(Config& config_in); - - Config config; - std::shared_ptr storage; - std::shared_ptr primary; -}; - -#endif // AKTUALIZR_LITE_HELPERS diff --git a/src/aktualizr_lite/helpers_test.cc b/src/aktualizr_lite/helpers_test.cc deleted file mode 100644 index 245fd9b400..0000000000 --- a/src/aktualizr_lite/helpers_test.cc +++ /dev/null @@ -1,73 +0,0 @@ -#include - -#include "helpers.h" - -static boost::filesystem::path test_sysroot; - -TEST(version, bad_versions) { - ASSERT_TRUE(Version("bar") < Version("foo")); - ASSERT_TRUE(Version("1.bar") < Version("2foo")); - ASSERT_TRUE(Version("1..0") < Version("1.1")); - ASSERT_TRUE(Version("1.-1") < Version("1.1")); - ASSERT_TRUE(Version("1.*bad #text") < Version("1.1")); // ord('*') < ord('1') -} - -TEST(version, good_versions) { - ASSERT_TRUE(Version("1.0.1") < Version("1.0.1.1")); - ASSERT_TRUE(Version("1.0.1") < Version("1.0.2")); - ASSERT_TRUE(Version("0.9") < Version("1.0.1")); - ASSERT_TRUE(Version("1.0.0.0") < Version("1.0.0.1")); - ASSERT_TRUE(Version("1") < Version("1.0.0.1")); - ASSERT_TRUE(Version("1.9.0") < Version("1.10")); -} - -// Ensure we finalize an install if completed -TEST(helpers, lite_client_finalize) { - TemporaryDirectory cfg_dir; - - Config config; - config.storage.path = cfg_dir.Path(); - config.pacman.type = PackageManager::kOstree; - config.pacman.sysroot = test_sysroot; - std::shared_ptr storage = INvStorage::newStorage(config.storage); - - Json::Value target_json; - target_json["hashes"]["sha256"] = "deadbeef"; - target_json["custom"]["targetFormat"] = "OSTREE"; - target_json["length"] = 0; - Uptane::Target target("test-finalize", target_json); - - setenv("OSTREE_HASH", "deadbeef", 1); - storage->savePrimaryInstalledVersion(target, InstalledVersionUpdateMode::kPending); - ASSERT_TRUE(target.MatchHash(LiteClient(config).primary->getCurrent().hashes()[0])); - - config = Config(); // Create a new config since LiteClient std::move's it - config.storage.path = cfg_dir.Path(); - config.pacman.type = PackageManager::kOstree; - config.pacman.sysroot = test_sysroot; - - setenv("OSTREE_HASH", "abcd", 1); - storage->savePrimaryInstalledVersion(target, InstalledVersionUpdateMode::kPending); - ASSERT_FALSE(target.MatchHash(LiteClient(config).primary->getCurrent().hashes()[0])); -} - -#ifndef __NO_MAIN__ -int main(int argc, char **argv) { - ::testing::InitGoogleTest(&argc, argv); - - if (argc != 2) { - std::cerr << "Error: " << argv[0] << " requires the path to an OSTree sysroot.\n"; - return EXIT_FAILURE; - } - - TemporaryDirectory temp_dir; - // Utils::copyDir doesn't work here. Complaints about non existent symlink path - int r = system((std::string("cp -r ") + argv[1] + std::string(" ") + temp_dir.PathString()).c_str()); - if (r != 0) { - return -1; - } - test_sysroot = (temp_dir.Path() / "ostree_repo").string(); - - return RUN_ALL_TESTS(); -} -#endif diff --git a/src/aktualizr_lite/main.cc b/src/aktualizr_lite/main.cc deleted file mode 100644 index b7158f8d0b..0000000000 --- a/src/aktualizr_lite/main.cc +++ /dev/null @@ -1,260 +0,0 @@ -#include -#include - -#include -#include - -#include "config/config.h" -#include "helpers.h" - -#include "utilities/aktualizr_version.h" - -namespace bpo = boost::program_options; - -static void log_info_target(const std::string &prefix, const Config &config, const Uptane::Target &t) { - auto name = t.filename(); - if (t.custom_version().length() > 0) { - name = t.custom_version(); - } - LOG_INFO << prefix + name << "\tsha256:" << t.sha256Hash(); - if (config.pacman.type == PackageManager::kOstreeDockerApp) { - bool shown = false; - auto apps = t.custom_data()["docker_apps"]; - for (Json::ValueIterator i = apps.begin(); i != apps.end(); ++i) { - if (!shown) { - shown = true; - LOG_INFO << "\tDocker Apps:"; - } - if ((*i).isObject() && (*i).isMember("filename")) { - LOG_INFO << "\t\t" << i.key().asString() << " -> " << (*i)["filename"].asString(); - } else { - LOG_ERROR << "\t\tInvalid custom data for docker-app: " << i.key().asString(); - } - } - } -} - -static int status_main(LiteClient &client, const bpo::variables_map &unused) { - (void)unused; - auto target = client.primary->getCurrent(); - - if (target.MatchTarget(Uptane::Target::Unknown())) { - LOG_INFO << "No active deployment found"; - } else { - auto name = target.filename(); - if (target.custom_version().length() > 0) { - name = target.custom_version(); - } - log_info_target("Active image is: ", client.config, target); - } - return 0; -} - -static int list_main(LiteClient &client, const bpo::variables_map &unused) { - (void)unused; - Uptane::HardwareIdentifier hwid(client.config.provision.primary_ecu_hardware_id); - - LOG_INFO << "Refreshing target metadata"; - if (!client.primary->updateImagesMeta()) { - LOG_WARNING << "Unable to update latest metadata, using local copy"; - if (!client.primary->checkImagesMetaOffline()) { - LOG_ERROR << "Unable to use local copy of TUF data"; - return 1; - } - } - - LOG_INFO << "Updates available to " << hwid << ":"; - for (auto &t : client.primary->allTargets()) { - for (auto const &it : t.hardwareIds()) { - if (it == hwid) { - log_info_target("", client.config, t); - break; - } - } - } - return 0; -} - -static std::unique_ptr find_target(const std::shared_ptr &client, - Uptane::HardwareIdentifier &hwid, const std::string &version) { - std::unique_ptr rv; - if (!client->updateImagesMeta()) { - LOG_WARNING << "Unable to update latest metadata, using local copy"; - if (!client->checkImagesMetaOffline()) { - LOG_ERROR << "Unable to use local copy of TUF data"; - throw std::runtime_error("Unable to find update"); - } - } - - bool find_latest = (version == "latest"); - std::unique_ptr latest = nullptr; - for (auto &t : client->allTargets()) { - for (auto const &it : t.hardwareIds()) { - if (it == hwid) { - if (find_latest) { - if (latest == nullptr || Version(latest->custom_version()) < Version(t.custom_version())) { - latest = std_::make_unique(t); - } - } else if (version == t.filename() || version == t.custom_version()) { - return std_::make_unique(t); - } - } - } - } - if (find_latest && latest != nullptr) { - return latest; - } - throw std::runtime_error("Unable to find update"); -} - -static int do_update(LiteClient &client, Uptane::Target &target) { - if (!client.primary->downloadImage(target).first) { - return 1; - } - - if (client.primary->VerifyTarget(target) != TargetStatus::kGood) { - LOG_ERROR << "Downloaded target is invalid"; - return 1; - } - - auto iresult = client.primary->PackageInstall(target); - if (iresult.result_code.num_code == data::ResultCode::Numeric::kNeedCompletion) { - LOG_INFO << "Update complete. Please reboot the device to activate"; - client.storage->savePrimaryInstalledVersion(target, InstalledVersionUpdateMode::kPending); - } else if (iresult.result_code.num_code == data::ResultCode::Numeric::kOk) { - client.storage->savePrimaryInstalledVersion(target, InstalledVersionUpdateMode::kCurrent); - } else { - LOG_ERROR << "Unable to install update: " << iresult.description; - return 1; - } - LOG_INFO << iresult.description; - return 0; -} - -static int update_main(LiteClient &client, const bpo::variables_map &variables_map) { - Uptane::HardwareIdentifier hwid(client.config.provision.primary_ecu_hardware_id); - - std::string version("latest"); - if (variables_map.count("update-name") > 0) { - version = variables_map["update-name"].as(); - } - LOG_INFO << "Finding " << version << " to update to..."; - auto target = find_target(client.primary, hwid, version); - LOG_INFO << "Updating to: " << *target; - return do_update(client, *target); -} - -struct SubCommand { - const char *name; - int (*main)(LiteClient &, const bpo::variables_map &); -}; -static SubCommand commands[] = { - {"status", status_main}, - {"list", list_main}, - {"update", update_main}, -}; - -void check_info_options(const bpo::options_description &description, const bpo::variables_map &vm) { - if (vm.count("help") != 0 || vm.count("command") == 0) { - std::cout << description << '\n'; - exit(EXIT_SUCCESS); - } - if (vm.count("version") != 0) { - std::cout << "Current aktualizr version is: " << aktualizr_version() << "\n"; - exit(EXIT_SUCCESS); - } -} - -bpo::variables_map parse_options(int argc, char *argv[]) { - std::string subs("Command to execute: "); - for (size_t i = 0; i < sizeof(commands) / sizeof(SubCommand); i++) { - if (i != 0) { - subs += ", "; - } - subs += commands[i].name; - } - bpo::options_description description("aktualizr-lite command line options"); - // clang-format off - // Try to keep these options in the same order as Config::updateFromCommandLine(). - // The first three are commandline only. - description.add_options() - ("help,h", "print usage") - ("version,v", "Current aktualizr version") - ("config,c", bpo::value >()->composing(), "configuration file or directory") - ("loglevel", bpo::value(), "set log level 0-5 (trace, debug, info, warning, error, fatal)") - ("repo-server", bpo::value(), "url of the uptane repo repository") - ("ostree-server", bpo::value(), "url of the ostree repository") - ("primary-ecu-hardware-id", bpo::value(), "hardware ID of primary ecu") - ("update-name", bpo::value(), "optional name of the update when running \"update\". default=latest") - ("command", bpo::value(), subs.c_str()); - // clang-format on - - // consider the first positional argument as the aktualizr run mode - bpo::positional_options_description pos; - pos.add("command", 1); - - bpo::variables_map vm; - std::vector unregistered_options; - try { - bpo::basic_parsed_options parsed_options = - bpo::command_line_parser(argc, argv).options(description).positional(pos).allow_unregistered().run(); - bpo::store(parsed_options, vm); - check_info_options(description, vm); - bpo::notify(vm); - unregistered_options = bpo::collect_unrecognized(parsed_options.options, bpo::exclude_positional); - if (vm.count("help") == 0 && !unregistered_options.empty()) { - std::cout << description << "\n"; - exit(EXIT_FAILURE); - } - } catch (const bpo::required_option &ex) { - // print the error and append the default commandline option description - std::cout << ex.what() << std::endl << description; - exit(EXIT_FAILURE); - } catch (const bpo::error &ex) { - check_info_options(description, vm); - - // log boost error - LOG_ERROR << "boost command line option error: " << ex.what(); - - // print the error message to the standard output too, as the user provided - // a non-supported commandline option - std::cout << ex.what() << '\n'; - - // set the returnValue, thereby ctest will recognize - // that something went wrong - exit(EXIT_FAILURE); - } - - return vm; -} - -int main(int argc, char *argv[]) { - logger_init(isatty(1) == 1); - logger_set_threshold(boost::log::trivial::info); - - bpo::variables_map commandline_map = parse_options(argc, argv); - - int r = EXIT_FAILURE; - try { - if (geteuid() != 0) { - LOG_WARNING << "\033[31mRunning as non-root and may not work as expected!\033[0m\n"; - } - - Config config(commandline_map); - config.storage.uptane_metadata_path = BasedPath(config.storage.path / "metadata"); - LOG_DEBUG << "Current directory: " << boost::filesystem::current_path().string(); - - std::string cmd = commandline_map["command"].as(); - for (size_t i = 0; i < sizeof(commands) / sizeof(SubCommand); i++) { - if (cmd == commands[i].name) { - LiteClient client(config); - return commands[i].main(client, commandline_map); - } - } - throw bpo::invalid_option_value(cmd); - r = EXIT_SUCCESS; - } catch (const std::exception &ex) { - LOG_ERROR << ex.what(); - } - return r; -} diff --git a/src/aktualizr_lite/ostree_mock.cc b/src/aktualizr_lite/ostree_mock.cc deleted file mode 100644 index ea42afacc0..0000000000 --- a/src/aktualizr_lite/ostree_mock.cc +++ /dev/null @@ -1,20 +0,0 @@ -#include -#include - -extern "C" OstreeDeployment *ostree_sysroot_get_booted_deployment(OstreeSysroot *self) { - (void)self; - static OstreeDeployment *deployment; - - if (deployment != nullptr) { - return deployment; - } - - const char *hash = getenv("OSTREE_HASH"); - deployment = ostree_deployment_new(0, "dummy-os", hash, 1, hash, 1); - return deployment; -} - -extern "C" const char *ostree_deployment_get_csum(OstreeDeployment *self) { - (void)self; - return getenv("OSTREE_HASH"); -} diff --git a/src/aktualizr_lite/test_lite.sh b/src/aktualizr_lite/test_lite.sh deleted file mode 100755 index ccdeafa1b8..0000000000 --- a/src/aktualizr_lite/test_lite.sh +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env bash -set -e - -build_dir=$(pwd) -aklite=$1 -uptane_gen_bin=$2 -tests_dir=$3 -#valgrind=$4 -valgrind="" -mock_ostree=$(dirname $aklite)/libt_lite-mock.so - -dest_dir=$(mktemp -d) - -cleanup() { - echo "cleaning up temp dir" - rm -rf "$dest_dir" - if [ -n "$pid" ] ; then - echo "killing webserver" - kill $pid - fi -} -trap cleanup EXIT - -uptane_gen() { - $uptane_gen_bin --repotype image --path "$dest_dir" "$@" -} - -add_target() { - custom_json="${dest_dir}/custom.json" - name=$1 - if [ -n "$2" ] ; then - sha=$2 - else - sha=$(echo $name | sha256sum | cut -f1 -d\ ) - fi - cat >$custom_json <$sota_dir/sota.toml < #include -#include "config/config.h" +#include "libaktualizr/aktualizr.h" +#include "libaktualizr/config.h" #include "logging/logging.h" -#include "primary/aktualizr.h" #include "primary/aktualizr_helpers.h" #include "secondary.h" #include "utilities/aktualizr_version.h" @@ -16,7 +16,7 @@ namespace bpo = boost::program_options; -void check_info_options(const bpo::options_description &description, const bpo::variables_map &vm) { +void checkInfoOptions(const bpo::options_description &description, const bpo::variables_map &vm) { if (vm.count("help") != 0) { std::cout << description << '\n'; exit(EXIT_SUCCESS); @@ -27,7 +27,7 @@ void check_info_options(const bpo::options_description &description, const bpo:: } } -bpo::variables_map parse_options(int argc, char *argv[]) { +bpo::variables_map parseOptions(int argc, char **argv) { bpo::options_description description("aktualizr command line options"); // clang-format off // Try to keep these options in the same order as Config::updateFromCommandLine(). @@ -38,15 +38,14 @@ bpo::variables_map parse_options(int argc, char *argv[]) { ("config,c", bpo::value >()->composing(), "configuration file or directory") ("loglevel", bpo::value(), "set log level 0-5 (trace, debug, info, warning, error, fatal)") ("run-mode", bpo::value(), "run mode of aktualizr: full, once, campaign_check, campaign_accept, campaign_decline, campaign_postpone, check, download, or install") - ("tls-server", bpo::value(), "url of device gateway") - ("repo-server", bpo::value(), "url of the uptane repo repository") - ("director-server", bpo::value(), "url of the uptane director repository") - ("ostree-server", bpo::value(), "url of the ostree repository") - ("primary-ecu-serial", bpo::value(), "serial number of primary ecu") - ("primary-ecu-hardware-id", bpo::value(), "hardware ID of primary ecu") - ("secondary-configs-dir", bpo::value(), "directory containing secondary ECU configuration files") - ("secondary-config-file", bpo::value(), "secondary ECUs configuration file") - ("campaign-id", bpo::value(), "id of the campaign to act on"); + ("tls-server", bpo::value(), "URL of device gateway") + ("repo-server", bpo::value(), "URL of the Uptane Image repository") + ("director-server", bpo::value(), "URL of the Uptane Director repository") + ("primary-ecu-serial", bpo::value(), "serial number of Primary ECU") + ("primary-ecu-hardware-id", bpo::value(), "hardware ID of Primary ECU") + ("secondary-config-file", bpo::value(), "Secondary ECUs configuration file") + ("campaign-id", bpo::value(), "ID of the campaign to act on") + ("hwinfo-file", bpo::value(), "custom hardware information JSON file"); // clang-format on // consider the first positional argument as the aktualizr run mode @@ -59,7 +58,7 @@ bpo::variables_map parse_options(int argc, char *argv[]) { bpo::basic_parsed_options parsed_options = bpo::command_line_parser(argc, argv).options(description).positional(pos).allow_unregistered().run(); bpo::store(parsed_options, vm); - check_info_options(description, vm); + checkInfoOptions(description, vm); bpo::notify(vm); unregistered_options = bpo::collect_unrecognized(parsed_options.options, bpo::exclude_positional); if (vm.count("help") == 0 && !unregistered_options.empty()) { @@ -71,7 +70,7 @@ bpo::variables_map parse_options(int argc, char *argv[]) { std::cout << ex.what() << std::endl << description; exit(EXIT_FAILURE); } catch (const bpo::error &ex) { - check_info_options(description, vm); + checkInfoOptions(description, vm); // log boost error LOG_ERROR << "boost command line option error: " << ex.what(); @@ -88,11 +87,15 @@ bpo::variables_map parse_options(int argc, char *argv[]) { return vm; } -void process_event(const std::shared_ptr &event) { - if (event->isTypeOf()) { - // Do nothing; libaktualizr already logs it. - } else if (event->variant == "UpdateCheckComplete") { +void processEvent(const std::shared_ptr &event) { + if (event->isTypeOf() || event->variant == "UpdateCheckComplete") { // Do nothing; libaktualizr already logs it. + } else if (event->variant == "AllDownloadsComplete") { + const auto *downloads_complete = dynamic_cast(event.get()); + LOG_INFO << "got " << event->variant << " event with status: " << downloads_complete->result.status; + } else if (event->variant == "AllInstallsComplete") { + const auto *installs_complete = dynamic_cast(event.get()); + LOG_INFO << "got " << event->variant << " event with status: " << installs_complete->result.dev_report.result_code; } else { LOG_INFO << "got " << event->variant << " event"; } @@ -102,7 +105,7 @@ int main(int argc, char *argv[]) { logger_init(); logger_set_threshold(boost::log::trivial::info); - bpo::variables_map commandline_map = parse_options(argc, argv); + bpo::variables_map commandline_map = parseOptions(argc, argv); LOG_INFO << "Aktualizr version " << aktualizr_version() << " starting"; @@ -117,17 +120,18 @@ int main(int argc, char *argv[]) { LOG_DEBUG << "Current directory: " << boost::filesystem::current_path().string(); Aktualizr aktualizr(config); - std::function event)> f_cb = process_event; + std::function event)> f_cb = processEvent; boost::signals2::scoped_connection conn; conn = aktualizr.SetSignalHandler(f_cb); if (!config.uptane.secondary_config_file.empty()) { - if (boost::filesystem::exists(config.uptane.secondary_config_file)) { + try { Primary::initSecondaries(aktualizr, config.uptane.secondary_config_file); - } else { - LOG_WARNING << "The specified secondary config file does not exist: " << config.uptane.secondary_config_file - << "\nProceed further without secondary(ies)"; + } catch (const std::exception &e) { + LOG_ERROR << "Failed to initialize Secondaries: " << e.what(); + LOG_ERROR << "Exiting..."; + return EXIT_FAILURE; } } @@ -142,6 +146,16 @@ int main(int argc, char *argv[]) { SigHandler::signal(SIGINT); SigHandler::signal(SIGTERM); + if (commandline_map.count("hwinfo-file") != 0) { + auto file = commandline_map["hwinfo-file"].as(); + auto hwinfo = Utils::parseJSONFile(file); + if (hwinfo.empty()) { + LOG_ERROR << file << " is not a valid JSON file"; + return EXIT_FAILURE; + } + aktualizr.SetCustomHardwareInfo(hwinfo); + } + std::string run_mode; if (commandline_map.count("run-mode") != 0) { run_mode = commandline_map["run-mode"].as(); @@ -174,7 +188,8 @@ int main(int argc, char *argv[]) { try { aktualizr.RunForever().get(); } catch (const std::exception &ex) { - LOG_ERROR << ex.what(); + LOG_ERROR << "Aktualizr::RunForever exiting:" << ex.what(); + return EXIT_FAILURE; } LOG_DEBUG << "Aktualizr daemon exiting..."; diff --git a/src/aktualizr_primary/primary_secondary_registration_test.cc b/src/aktualizr_primary/primary_secondary_registration_test.cc new file mode 100644 index 0000000000..40b6352187 --- /dev/null +++ b/src/aktualizr_primary/primary_secondary_registration_test.cc @@ -0,0 +1,134 @@ +#include + +#include "httpfake.h" +#include "libaktualizr/aktualizr.h" +#include "secondary.h" +#include "uptane_test_common.h" +#include "utilities/utils.h" + +boost::filesystem::path fake_meta_dir; + +/* This tests that a device that had an IP Secondary will still find it after + * recent changes, even if it does not connect when the device starts. Note that + * this is only supported for a single IP Secondary. */ +TEST(PrimarySecondaryReg, SecondariesMigration) { + const Uptane::EcuSerial primary_serial{"p_serial"}; + const Uptane::EcuSerial secondary_serial{"s_serial"}; + const Uptane::HardwareIdentifier primary_hwid{"p_hwid"}; + const Uptane::HardwareIdentifier secondary_hwid{"s_hwid"}; + + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path(), "noupdates", fake_meta_dir); + const auto& url = http->tls_server; + + Config conf("tests/config/basic.toml"); + conf.uptane.director_server = url + "/director"; + conf.uptane.repo_server = url + "/repo"; + conf.provision.server = url; + conf.provision.primary_ecu_serial = primary_serial.ToString(); + conf.provision.primary_ecu_hardware_id = primary_hwid.ToString(); + conf.storage.path = temp_dir.Path(); + conf.import.base_path = temp_dir.Path() / "import"; + conf.tls.server = url; + conf.bootloader.reboot_sentinel_dir = temp_dir.Path(); + const boost::filesystem::path sec_conf_path = temp_dir / "s_config.json"; + conf.uptane.secondary_config_file = sec_conf_path; + + auto storage = INvStorage::newStorage(conf.storage); + Json::Value sec_conf; + + // Prepare storage the "old" way (without the secondary_ecus table): + storage->storeDeviceId("device"); + storage->storeEcuSerials({{primary_serial, primary_hwid}, {secondary_serial, secondary_hwid}}); + storage->storeEcuRegistered(); + + // Also test backwards compatibility with verification_type here. Don't set + // it but expect it to be set to Full. + sec_conf["IP"]["secondary_wait_port"] = 9030; + sec_conf["IP"]["secondary_wait_timeout"] = 1; + sec_conf["IP"]["secondaries"] = Json::arrayValue; + sec_conf["IP"]["secondaries"][0]["addr"] = "127.0.0.1:9061"; + Utils::writeFile(sec_conf_path, sec_conf); + + { + // Confirm that the fields from the secondary_ecus table are empty. + std::vector secs_info; + storage->loadSecondariesInfo(&secs_info); + EXPECT_EQ(secs_info.size(), 1); + EXPECT_EQ(secs_info[0].serial.ToString(), secondary_serial.ToString()); + EXPECT_EQ(secs_info[0].type, ""); + EXPECT_EQ(secs_info[0].extra, ""); + } + + { + // Verify that aktualizr can still start if it can't connect to its + // Secondary. This will migrate the Secondary. + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + Primary::initSecondaries(aktualizr, sec_conf_path); + aktualizr.Initialize(); + aktualizr.CheckUpdates().get(); + + std::vector secs_info; + storage->loadSecondariesInfo(&secs_info); + EXPECT_EQ(secs_info.size(), 1); + EXPECT_EQ(secs_info[0].serial.ToString(), secondary_serial.ToString()); + EXPECT_EQ(secs_info[0].type, "IP"); + EXPECT_EQ(secs_info[0].extra, R"({"ip":"127.0.0.1","port":9061,"verification_type":"Full"})"); + } + + { + // Try again (again without connecting) to verify that the Secondary is + // correctly found in the storage. + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + Primary::initSecondaries(aktualizr, sec_conf_path); + aktualizr.Initialize(); + aktualizr.CheckUpdates().get(); + + std::vector secs_info; + storage->loadSecondariesInfo(&secs_info); + EXPECT_EQ(secs_info.size(), 1); + EXPECT_EQ(secs_info[0].serial.ToString(), secondary_serial.ToString()); + EXPECT_EQ(secs_info[0].type, "IP"); + EXPECT_EQ(secs_info[0].extra, R"({"ip":"127.0.0.1","port":9061,"verification_type":"Full"})"); + } +} + +/* + * Register Virtual Secondaries via json configuration. + * Reject multiple Secondaries with the same serial. + */ +TEST(PrimarySecondaryReg, VirtualSecondary) { + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path(), "noupdates", fake_meta_dir); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + auto storage = INvStorage::newStorage(conf.storage); + + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + // This should fail because TestAktualizr automatically adds the default + // Secondary created in makeTestConfig. + EXPECT_THROW(Primary::initSecondaries(aktualizr, conf.uptane.secondary_config_file), std::exception); + + boost::filesystem::remove(conf.uptane.secondary_config_file); + UptaneTestCommon::addDefaultSecondary(conf, temp_dir, "serial2", "hwid2"); + UptaneTestCommon::addDefaultSecondary(conf, temp_dir, "serial3", "hwid3"); + Primary::initSecondaries(aktualizr, conf.uptane.secondary_config_file); + aktualizr.Initialize(); + + std::vector expected_ecus = {"CA:FE:A6:D2:84:9D", "secondary_ecu_serial", "serial2", "serial3"}; + UptaneTestCommon::verifyEcus(temp_dir, expected_ecus); +} + +#ifndef __NO_MAIN__ +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + + logger_init(); + logger_set_threshold(boost::log::trivial::trace); + + TemporaryDirectory tmp_dir; + fake_meta_dir = tmp_dir.Path(); + MetaFake meta_fake(fake_meta_dir); + + return RUN_ALL_TESTS(); +} +#endif diff --git a/src/aktualizr_primary/secondary.cc b/src/aktualizr_primary/secondary.cc index 8f007ad90d..5beca88916 100644 --- a/src/aktualizr_primary/secondary.cc +++ b/src/aktualizr_primary/secondary.cc @@ -2,30 +2,36 @@ #include #include #include -#include +#include +#include +#include #include -#include #include "ipuptanesecondary.h" +#include "logging/logging.h" #include "secondary.h" #include "secondary_config.h" +#include "utilities/utils.h" namespace Primary { -using Secondaries = std::vector>; -using SecondaryFactoryRegistry = std::unordered_map>; +using Secondaries = std::vector>; +using SecondaryFactoryRegistry = + std::unordered_map>; -static Secondaries createIPSecondaries(const IPSecondariesConfig& config); +static Secondaries createIPSecondaries(const IPSecondariesConfig& config, Aktualizr& aktualizr); +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) static SecondaryFactoryRegistry sec_factory_registry = { {IPSecondariesConfig::Type, - [](const SecondaryConfig& config) { + [](const SecondaryConfig& config, Aktualizr& aktualizr) { auto ip_sec_cgf = dynamic_cast(config); - return createIPSecondaries(ip_sec_cgf); + return createIPSecondaries(ip_sec_cgf, aktualizr); }}, {VirtualSecondaryConfig::Type, - [](const SecondaryConfig& config) { + [](const SecondaryConfig& config, Aktualizr& aktualizr) { + (void)aktualizr; auto virtual_sec_cgf = dynamic_cast(config); return Secondaries({std::make_shared(virtual_sec_cgf)}); }}, @@ -34,8 +40,8 @@ static SecondaryFactoryRegistry sec_factory_registry = { // } }; -static Secondaries createSecondaries(const SecondaryConfig& config) { - return (sec_factory_registry.at(config.type()))(config); +static Secondaries createSecondaries(const SecondaryConfig& config, Aktualizr& aktualizr) { + return (sec_factory_registry.at(config.type()))(config, aktualizr); } void initSecondaries(Aktualizr& aktualizr, const boost::filesystem::path& config_file) { @@ -47,31 +53,33 @@ void initSecondaries(Aktualizr& aktualizr, const boost::filesystem::path& config for (auto& config : secondary_configs) { try { - LOG_INFO << "Creating " << config->type() << " secondaries..."; - Secondaries secondaries = createSecondaries(*config); + LOG_INFO << "Initializing " << config->type() << " Secondaries..."; + Secondaries secondaries = createSecondaries(*config, aktualizr); for (const auto& secondary : secondaries) { - LOG_INFO << "Adding Secondary to Aktualizr." - << "HW_ID: " << secondary->getHwId() << " Serial: " << secondary->getSerial(); + LOG_INFO << "Adding Secondary with ECU serial: " << secondary->getSerial() + << " with hardware ID: " << secondary->getHwId(); aktualizr.AddSecondary(secondary); } } catch (const std::exception& exc) { - LOG_ERROR << "Failed to initialize a secondary: " << exc.what(); - LOG_ERROR << "Continue with initialization of the remaining secondaries, if any left."; - // otherwise rethrow the exception + LOG_ERROR << "Failed to initialize a Secondary: " << exc.what(); + throw; } } } class SecondaryWaiter { public: - SecondaryWaiter(uint16_t wait_port, int timeout_s, Secondaries& secondaries) - : endpoint_{boost::asio::ip::tcp::v4(), wait_port}, + SecondaryWaiter(Aktualizr& aktualizr, uint16_t wait_port, int timeout_s, Secondaries& secondaries) + : aktualizr_(aktualizr), + endpoint_{boost::asio::ip::tcp::v4(), wait_port}, timeout_{static_cast(timeout_s)}, timer_{io_context_}, - connected_secondaries_(secondaries) {} + connected_secondaries_{secondaries} {} - void addSecoondary(const std::string& ip, uint16_t port) { secondaries_to_wait_for_.insert(key(ip, port)); } + void addSecondary(const std::string& ip, uint16_t port, VerificationType verification_type) { + secondaries_to_wait_for_.insert({key(ip, port), verification_type}); + } void wait() { if (secondaries_to_wait_for_.empty()) { @@ -81,9 +89,11 @@ class SecondaryWaiter { timer_.expires_from_now(timeout_); timer_.async_wait([&](const boost::system::error_code& error_code) { if (!!error_code) { - LOG_ERROR << "Wait for secondaries has failed: " << error_code; + LOG_ERROR << "Wait for Secondaries has failed: " << error_code; + throw std::runtime_error("Error while waiting for IP Secondaries"); } else { - LOG_ERROR << "Timeout while waiting for secondaries: " << error_code; + LOG_ERROR << "Timeout while waiting for Secondaries: " << error_code; + throw std::runtime_error("Timeout while waiting for IP Secondaries"); } io_context_.stop(); }); @@ -93,7 +103,7 @@ class SecondaryWaiter { private: void accept() { - LOG_INFO << "Waiting for connection from " << secondaries_to_wait_for_.size() << " secondaries..."; + LOG_INFO << "Waiting for connection from " << secondaries_to_wait_for_.size() << " Secondaries..."; acceptor_.async_accept(con_socket_, boost::bind(&SecondaryWaiter::connectionHdlr, this, boost::asio::placeholders::error)); } @@ -102,33 +112,45 @@ class SecondaryWaiter { if (!error_code) { auto sec_ip = con_socket_.remote_endpoint().address().to_string(); auto sec_port = con_socket_.remote_endpoint().port(); + auto it = secondaries_to_wait_for_.find(key(sec_ip, sec_port)); + if (it == secondaries_to_wait_for_.end()) { + LOG_INFO << "Unexpected connection from a Secondary: (" << sec_ip << ":" << sec_port << ")"; + return; + } - LOG_INFO << "Accepted connection from a secondary: (" << sec_ip << ":" << sec_port << ")"; + LOG_INFO << "Accepted connection from a Secondary: (" << sec_ip << ":" << sec_port << ")"; try { - auto sec_creation_res = Uptane::IpUptaneSecondary::create(sec_ip, sec_port, con_socket_.native_handle()); - if (sec_creation_res.first) { - connected_secondaries_.push_back(sec_creation_res.second); + auto secondary = Uptane::IpUptaneSecondary::create(sec_ip, sec_port, it->second, con_socket_.native_handle()); + if (secondary) { + connected_secondaries_.push_back(secondary); + // set ip/port in the db so that we can match everything later + Json::Value d; + d["ip"] = sec_ip; + d["port"] = sec_port; + d["verification_type"] = Uptane::VerificationTypeToString(it->second); + aktualizr_.SetSecondaryData(secondary->getSerial(), Utils::jsonToCanonicalStr(d)); } } catch (const std::exception& exc) { - LOG_ERROR << "Failed to initialize a secondary: " << exc.what(); + LOG_ERROR << "Failed to initialize a Secondary: " << exc.what(); } con_socket_.shutdown(boost::asio::ip::tcp::socket::shutdown_both); con_socket_.close(); - secondaries_to_wait_for_.erase(key(sec_ip, sec_port)); + secondaries_to_wait_for_.erase(it); if (!secondaries_to_wait_for_.empty()) { accept(); } else { io_context_.stop(); } } else { - LOG_ERROR << "Failed to accept connection from a secondary"; + LOG_ERROR << "Failed to accept connection from a Secondary"; } } static std::string key(const std::string& ip, uint16_t port) { return (ip + std::to_string(port)); } - private: + Aktualizr& aktualizr_; + boost::asio::io_service io_context_; boost::asio::ip::tcp::endpoint endpoint_; boost::asio::ip::tcp::acceptor acceptor_{io_context_, endpoint_}; @@ -137,23 +159,80 @@ class SecondaryWaiter { boost::asio::deadline_timer timer_; Secondaries& connected_secondaries_; - std::unordered_set secondaries_to_wait_for_; + std::unordered_map secondaries_to_wait_for_; }; -static Secondaries createIPSecondaries(const IPSecondariesConfig& config) { +// Four options for each Secondary: +// 1. Secondary is configured and stored: nothing to do. +// 2. Secondary is configured but not stored: it must be new. Try to connect to get information and store it. This will +// cause re-registration. +// 3. Same as 2 but cannot connect: abort. +// 4. Secondary is stored but not configured: it must have been removed. Skip it. This will cause re-registration. +static Secondaries createIPSecondaries(const IPSecondariesConfig& config, Aktualizr& aktualizr) { Secondaries result; - SecondaryWaiter sec_waiter{config.secondaries_wait_port, config.secondaries_timeout_s, result}; + SecondaryWaiter sec_waiter{aktualizr, config.secondaries_wait_port, config.secondaries_timeout_s, result}; + auto secondaries_info = aktualizr.GetSecondaries(); + + for (const auto& cfg : config.secondaries_cfg) { + SecondaryInterface::Ptr secondary; + const SecondaryInfo* info = nullptr; + + // Try to match the configured Secondaries to stored Secondaries. + auto f = std::find_if(secondaries_info.cbegin(), secondaries_info.cend(), [&cfg](const SecondaryInfo& i) { + Json::Value d = Utils::parseJSON(i.extra); + // Don't match on verification_type; it wasn't there originally and is + // allowed to change. + return d["ip"] == cfg.ip && d["port"] == cfg.port; + }); - for (auto& ip_sec_cfg : config.secondaries_cfg) { - auto sec_creation_res = Uptane::IpUptaneSecondary::connectAndCreate(ip_sec_cfg.ip, ip_sec_cfg.port); - if (sec_creation_res.first) { - result.push_back(sec_creation_res.second); + if (f == secondaries_info.cend() && config.secondaries_cfg.size() == 1 && secondaries_info.size() == 1 && + secondaries_info[0].extra.empty()) { + // /!\ backward compatibility: if we have just one Secondary in the old + // storage format (before we had the secondary_ecus table) and the + // configuration, migrate it to the new format. + info = &secondaries_info[0]; + Json::Value d; + d["ip"] = cfg.ip; + d["port"] = cfg.port; + d["verification_type"] = Uptane::VerificationTypeToString(cfg.verification_type); + aktualizr.SetSecondaryData(info->serial, Utils::jsonToCanonicalStr(d)); + LOG_INFO << "Migrated a single IP Secondary to new storage format."; + } else if (f == secondaries_info.cend()) { + // Secondary was not found in storage; it must be new. + secondary = Uptane::IpUptaneSecondary::connectAndCreate(cfg.ip, cfg.port, cfg.verification_type); + if (secondary == nullptr) { + LOG_DEBUG << "Could not connect to IP Secondary at " << cfg.ip << ":" << cfg.port + << "; now trying to wait for it."; + sec_waiter.addSecondary(cfg.ip, cfg.port, cfg.verification_type); + } else { + result.push_back(secondary); + // set ip/port in the db so that we can match everything later + Json::Value d; + d["ip"] = cfg.ip; + d["port"] = cfg.port; + d["verification_type"] = Uptane::VerificationTypeToString(cfg.verification_type); + aktualizr.SetSecondaryData(secondary->getSerial(), Utils::jsonToCanonicalStr(d)); + } + continue; } else { - sec_waiter.addSecoondary(ip_sec_cfg.ip, ip_sec_cfg.port); + // The configured Secondary was found in storage. + info = &(*f); + } + + if (secondary == nullptr) { + secondary = Uptane::IpUptaneSecondary::connectAndCheck(cfg.ip, cfg.port, cfg.verification_type, info->serial, + info->hw_id, info->pub_key); + if (secondary == nullptr) { + throw std::runtime_error("Unable to connect to or verify IP Secondary at " + cfg.ip + ":" + + std::to_string(cfg.port)); + } } + + result.push_back(secondary); } sec_waiter.wait(); + return result; } diff --git a/src/aktualizr_primary/secondary.h b/src/aktualizr_primary/secondary.h index 4142720fb6..ad9aaf8a91 100644 --- a/src/aktualizr_primary/secondary.h +++ b/src/aktualizr_primary/secondary.h @@ -1,9 +1,9 @@ #ifndef SECONDARY_H_ #define SECONDARY_H_ -#include +#include -#include "primary/aktualizr.h" +#include "libaktualizr/aktualizr.h" namespace Primary { diff --git a/src/aktualizr_primary/secondary_config.cc b/src/aktualizr_primary/secondary_config.cc index f889526c91..6acba69568 100644 --- a/src/aktualizr_primary/secondary_config.cc +++ b/src/aktualizr_primary/secondary_config.cc @@ -1,13 +1,17 @@ +#include #include #include #include +#include + #include "logging/logging.h" #include "secondary_config.h" +#include "utilities/utils.h" namespace Primary { -const char* const IPSecondariesConfig::Type = "IP"; +constexpr const char* const IPSecondariesConfig::Type; SecondaryConfigParser::Configs SecondaryConfigParser::parse_config_file(const boost::filesystem::path& config_file) { if (!boost::filesystem::exists(config_file)) { @@ -15,10 +19,10 @@ SecondaryConfigParser::Configs SecondaryConfigParser::parse_config_file(const bo } auto cfg_file_ext = boost::filesystem::extension(config_file); - std::shared_ptr cfg_parser; + std::unique_ptr cfg_parser; if (cfg_file_ext == ".json") { - cfg_parser = std::make_shared(config_file); + cfg_parser = std_::make_unique(config_file); } else { // add your format of configuration file + implement SecondaryConfigParser specialization throw std::invalid_argument("Unsupported type of config format: " + cfg_file_ext); } @@ -34,8 +38,8 @@ config file example "secondaries_wait_port": 9040, "secondaries_wait_timeout": 20, "secondaries": [ - {"addr": "127.0.0.1:9031"} - {"addr": "127.0.0.1:9032"} + {"addr": "127.0.0.1:9031", "verification_type": "Full"} + {"addr": "127.0.0.1:9032", "verification_type": "Tuf"} ] }, "socketcan": { @@ -53,22 +57,21 @@ config file example JsonConfigParser::JsonConfigParser(const boost::filesystem::path& config_file) { assert(boost::filesystem::exists(config_file)); std::ifstream json_file_stream(config_file.string()); - Json::Reader json_reader; + std::string errs; - if (!json_reader.parse(json_file_stream, root_, false)) { - throw std::invalid_argument("Failed to parse secondary config file: " + config_file.string() + ": " + - json_reader.getFormattedErrorMessages()); + if (!Json::parseFromStream(Json::CharReaderBuilder(), json_file_stream, &root_, &errs)) { + throw std::invalid_argument("Failed to parse secondary config file: " + config_file.string() + ": " + errs); } } SecondaryConfigParser::Configs JsonConfigParser::parse() { Configs res_sec_cfg; - for (Json::ValueIterator it = root_.begin(); it != root_.end(); ++it) { + for (auto it = root_.begin(); it != root_.end(); ++it) { std::string secondary_type = it.key().asString(); if (sec_cfg_factory_registry_.find(secondary_type) == sec_cfg_factory_registry_.end()) { - LOG_ERROR << "Unsupported type of sescondary config was found: `" << secondary_type + LOG_ERROR << "Unsupported type of secondary config was found: `" << secondary_type << "`. Ignoring it and continuing with parsing of other secondary configs"; } else { (sec_cfg_factory_registry_.at(secondary_type))(res_sec_cfg, *it); @@ -99,7 +102,12 @@ void JsonConfigParser::createIPSecondariesCfg(Configs& configs, const Json::Valu for (const auto& secondary : secondaries) { auto addr = getIPAndPort(secondary[IPSecondaryConfig::AddrField].asString()); - IPSecondaryConfig sec_cfg{addr.first, addr.second}; + // Backwards compatibility: assume full verification if not specified. + VerificationType vtype = VerificationType::kFull; + if (secondary.isMember(IPSecondaryConfig::VerificationField)) { + vtype = Uptane::VerificationTypeFromString(secondary[IPSecondaryConfig::VerificationField].asString()); + } + IPSecondaryConfig sec_cfg{addr.first, addr.second, vtype}; LOG_INFO << " found IP secondary config: " << sec_cfg; resultant_cfg->secondaries_cfg.push_back(sec_cfg); diff --git a/src/aktualizr_primary/secondary_config.h b/src/aktualizr_primary/secondary_config.h index 68f31ef44f..06940c24b3 100644 --- a/src/aktualizr_primary/secondary_config.h +++ b/src/aktualizr_primary/secondary_config.h @@ -1,10 +1,13 @@ #ifndef SECONDARY_CONFIG_H_ #define SECONDARY_CONFIG_H_ -#include -#include +#include #include +#include +#include + +#include "libaktualizr/types.h" #include "primary/secondary_config.h" #include "virtualsecondary.h" @@ -13,22 +16,24 @@ namespace Primary { class IPSecondaryConfig { public: static constexpr const char* const AddrField{"addr"}; + static constexpr const char* const VerificationField{"verification_type"}; - IPSecondaryConfig(std::string addr_ip, uint16_t addr_port) : ip(std::move(addr_ip)), port(addr_port) {} + IPSecondaryConfig(std::string addr_ip, uint16_t addr_port, VerificationType verification_type_in) + : ip(std::move(addr_ip)), port(addr_port), verification_type(verification_type_in) {} friend std::ostream& operator<<(std::ostream& os, const IPSecondaryConfig& cfg) { - os << "(addr: " << cfg.ip << ":" << cfg.port << ")"; + os << "(addr: " << cfg.ip << ":" << cfg.port << " verification_type: " << cfg.verification_type << ")"; return os; } - public: const std::string ip; const uint16_t port; + const VerificationType verification_type; }; class IPSecondariesConfig : public SecondaryConfig { public: - static const char* const Type; + static constexpr const char* const Type{"IP"}; static constexpr const char* const PortField{"secondaries_wait_port"}; static constexpr const char* const TimeoutField{"secondaries_wait_timeout"}; static constexpr const char* const SecondariesField{"secondaries"}; @@ -41,7 +46,6 @@ class IPSecondariesConfig : public SecondaryConfig { return os; } - public: const uint16_t secondaries_wait_port; const int secondaries_timeout_s; std::vector secondaries_cfg; @@ -52,15 +56,19 @@ class SecondaryConfigParser { using Configs = std::vector>; static Configs parse_config_file(const boost::filesystem::path& config_file); + SecondaryConfigParser() = default; virtual ~SecondaryConfigParser() = default; + SecondaryConfigParser(const SecondaryConfigParser&) = default; + SecondaryConfigParser(SecondaryConfigParser&&) = default; + SecondaryConfigParser& operator=(const SecondaryConfigParser&) = default; + SecondaryConfigParser& operator=(SecondaryConfigParser&&) = default; - // TODO implement iterator instead of parse virtual Configs parse() = 0; }; class JsonConfigParser : public SecondaryConfigParser { public: - JsonConfigParser(const boost::filesystem::path& config_file); + explicit JsonConfigParser(const boost::filesystem::path& config_file); Configs parse() override; @@ -69,7 +77,6 @@ class JsonConfigParser : public SecondaryConfigParser { static void createVirtualSecondariesCfg(Configs& configs, const Json::Value& json_virtual_sec_cfg); // add here a factory method for another type of secondary config - private: using SecondaryConfigFactoryRegistry = std::unordered_map>; SecondaryConfigFactoryRegistry sec_cfg_factory_registry_ = { diff --git a/src/aktualizr_secondary/CMakeLists.txt b/src/aktualizr_secondary/CMakeLists.txt index 2b677ad5ae..e7e353a8ba 100644 --- a/src/aktualizr_secondary/CMakeLists.txt +++ b/src/aktualizr_secondary/CMakeLists.txt @@ -3,73 +3,91 @@ set(AKTUALIZR_SECONDARY_SRC main.cc) set(AKTUALIZR_SECONDARY_LIB_SRC aktualizr_secondary.cc aktualizr_secondary_config.cc - aktualizr_secondary_common.cc - socket_server.cc + aktualizr_secondary_file.cc + msg_handler.cc + secondary_tcp_server.cc + update_agent_file.cc ) -add_library(aktualizr_secondary_static_lib STATIC +# do not link tests with libaktualizr +list(REMOVE_ITEM TEST_LIBS aktualizr_lib) + +add_library(aktualizr_secondary_lib SHARED ${AKTUALIZR_SECONDARY_LIB_SRC} + $ + $ $ + $ $ $ + $ $ - $ $ - $ $ - $) - -target_link_libraries(aktualizr_secondary_static_lib aktualizr-posix) - -target_include_directories(aktualizr_secondary_static_lib PUBLIC - $ - $ - ${PROJECT_SOURCE_DIR}/src/libaktualizr-posix - ) + $) +set_target_properties(aktualizr_secondary_lib PROPERTIES LIBRARY_OUTPUT_NAME aktualizr_secondary) +target_link_libraries(aktualizr_secondary_lib aktualizr-posix ${AKTUALIZR_EXTERNAL_LIBS}) add_executable(aktualizr-secondary ${AKTUALIZR_SECONDARY_SRC}) -target_link_libraries(aktualizr-secondary - aktualizr_secondary_static_lib - ${AKTUALIZR_EXTERNAL_LIBS} - ) +target_link_libraries(aktualizr-secondary aktualizr_secondary_lib) +install(TARGETS aktualizr_secondary_lib LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT aktualizr) install(TARGETS aktualizr-secondary COMPONENT aktualizr - RUNTIME DESTINATION bin) + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) set(ALL_AKTUALIZR_SECONDARY_HEADERS aktualizr_secondary.h - aktualizr_secondary_interface.h aktualizr_secondary_config.h - aktualizr_secondary_common.h - socket_server.h + aktualizr_secondary_file.h + msg_handler.h + secondary_tcp_server.h + update_agent.h + update_agent_file.h ) -include(AddAktualizrTest) - # insert in front, so that the order matches the dependencies to the system libraries -list(INSERT TEST_LIBS 0 aktualizr_secondary_static_lib) +list(INSERT TEST_LIBS 0 aktualizr_secondary_lib) + +add_aktualizr_test(NAME aktualizr_secondary + SOURCES aktualizr_secondary_test.cc $ + LIBRARIES aktualizr_secondary_lib uptane_generator_lib) add_aktualizr_test(NAME aktualizr_secondary_config - SOURCES aktualizr_secondary_config_test.cc PROJECT_WORKING_DIRECTORY) + SOURCES aktualizr_secondary_config_test.cc PROJECT_WORKING_DIRECTORY + LIBRARIES aktualizr_secondary_lib) -add_aktualizr_test(NAME aktualizr_secondary_update - SOURCES update_test.cc - ARGS ${PROJECT_BINARY_DIR}/ostree_repo PROJECT_WORKING_DIRECTORY) +add_aktualizr_test(NAME secondary_rpc + SOURCES secondary_rpc_test.cc $ $ $ $ $ + PROJECT_WORKING_DIRECTORY) + +list(REMOVE_ITEM TEST_SOURCES $ $ $ $ $) if(BUILD_OSTREE) - add_aktualizr_test(NAME aktualizr_secondary_uptane - SOURCES uptane_test.cc - LIBRARIES uptane_generator_lib - LIBRARIES aktualizr-posix - ARGS ${PROJECT_BINARY_DIR}/ostree_repo PROJECT_WORKING_DIRECTORY) - target_link_libraries(t_aktualizr_secondary_uptane virtual_secondary) + target_sources(aktualizr_secondary_lib PRIVATE update_agent_ostree.cc aktualizr_secondary_ostree.cc) + list(APPEND AKTUALIZR_SECONDARY_LIB_SRC update_agent_ostree.cc aktualizr_secondary_ostree.cc) + list(APPEND ALL_AKTUALIZR_SECONDARY_HEADERS update_agent_ostree.h aktualizr_secondary_ostree.h) + + add_aktualizr_test(NAME aktualizr_secondary_ostree + SOURCES aktualizr_secondary_ostree_test.cc PROJECT_WORKING_DIRECTORY + ARGS ${PROJECT_BINARY_DIR}/ostree_repo + LIBRARIES aktualizr_secondary_lib uptane_generator_lib $) + + set_target_properties(t_aktualizr_secondary_ostree PROPERTIES LINK_FLAGS -Wl,--export-dynamic) else(BUILD_OSTREE) - list(APPEND TEST_SOURCES uptane_test.cc) + list(APPEND TEST_SOURCES aktualizr_secondary_ostree_test.cc update_agent_ostree.cc aktualizr_secondary_ostree.cc) + list(APPEND ALL_AKTUALIZR_SECONDARY_HEADERS update_agent_ostree.h aktualizr_secondary_ostree.h) endif(BUILD_OSTREE) -# test running the executable with command line option --help -add_test(NAME aktualizr_secondary_cmdline--help COMMAND aktualizr-secondary --help) +# Check the --help option works. +add_test(NAME aktualizr-secondary-option-help + COMMAND aktualizr-secondary --help) + +# Report version. +add_test(NAME aktualizr-secondary-option-version + COMMAND aktualizr-secondary --version) +set_tests_properties(aktualizr-secondary-option-version PROPERTIES PASS_REGULAR_EXPRESSION "Current aktualizr-secondary version is: ${AKTUALIZR_VERSION}") + # test running the executable with command line option --something add_test(NAME aktualizr_secondary_cmdline--something COMMAND aktualizr-secondary --something -c ${PROJECT_SOURCE_DIR}/tests/config/minimal.toml) @@ -111,7 +129,7 @@ add_test(NAME aktualizr_secondary_log_default COMMAND aktualizr-secondary -c ${PROJECT_SOURCE_DIR}/tests/config/minimal.toml) set_tests_properties(aktualizr_secondary_log_default PROPERTIES FAIL_REGULAR_EXPRESSION "Final configuration that will be used" - PASS_REGULAR_EXPRESSION "Aktualizr-secondary version") + PASS_REGULAR_EXPRESSION "aktualizr-secondary version") # Check aktualizr-secondary invalid logging levels. add_test(NAME aktualizr_secondary_log_invalid @@ -123,7 +141,8 @@ add_test(NAME aktualizr_secondary_log_negative set_tests_properties(aktualizr_secondary_log_negative PROPERTIES PASS_REGULAR_EXPRESSION "Invalid log level") -# these tests pose problem on ptest and are not really worth running there +# These tests cause problems with ptest and are not really worth running there +# anyway: set_tests_properties(aktualizr_secondary_help_with_other_options aktualizr_secondary_help_with_nonexistent_options aktualizr_secondary_log_debug diff --git a/src/aktualizr_secondary/aktualizr_secondary.cc b/src/aktualizr_secondary/aktualizr_secondary.cc index cdf78a2278..21982c66e4 100644 --- a/src/aktualizr_secondary/aktualizr_secondary.cc +++ b/src/aktualizr_secondary/aktualizr_secondary.cc @@ -3,201 +3,491 @@ #include #include +#include +#include + +#include "crypto/keymanager.h" +#include "libaktualizr/types.h" #include "logging/logging.h" -#ifdef BUILD_OSTREE -#include "package_manager/ostreemanager.h" // TODO: Hide behind PackageManagerInterface -#endif -#include "socket_activation/socket_activation.h" -#include "socket_server.h" +#include "storage/invstorage.h" +#include "update_agent.h" +#include "uptane/manifest.h" #include "utilities/utils.h" -class SecondaryAdapter : public Uptane::SecondaryInterface { - public: - SecondaryAdapter(AktualizrSecondary& sec) : secondary(sec) {} - ~SecondaryAdapter() override = default; - - Uptane::EcuSerial getSerial() override { return secondary.getSerialResp(); } - Uptane::HardwareIdentifier getHwId() override { return secondary.getHwIdResp(); } - PublicKey getPublicKey() override { return secondary.getPublicKeyResp(); } - Json::Value getManifest() override { return secondary.getManifestResp(); } - bool putMetadata(const Uptane::RawMetaPack& meta_pack) override { return secondary.putMetadataResp(meta_pack); } - int32_t getRootVersion(bool director) override { return secondary.getRootVersionResp(director); } - bool putRoot(const std::string& root, bool director) override { return secondary.putRootResp(root, director); } - bool sendFirmware(const std::shared_ptr& data) override { - return secondary.AktualizrSecondary::sendFirmwareResp(data); - } - - private: - AktualizrSecondary& secondary; -}; - -AktualizrSecondary::AktualizrSecondary(const AktualizrSecondaryConfig& config, - const std::shared_ptr& storage) - : AktualizrSecondaryCommon(config, storage), - socket_server_(std_::make_unique(*this), SocketFromSystemdOrPort(config.network.port)) { - // note: we don't use TlsConfig here and supply the default to - // KeyManagerConf. Maybe we should figure a cleaner way to do that - // (split KeyManager?) - if (!uptaneInitialize()) { - LOG_ERROR << "Failed to initialize"; - return; - } +AktualizrSecondary::AktualizrSecondary(AktualizrSecondaryConfig config, std::shared_ptr storage) + : config_(std::move(config)), + storage_(std::move(storage)), + keys_(std::make_shared(storage_, config_.keymanagerConfig())) { + uptaneInitialize(); + manifest_issuer_ = std::make_shared(keys_, ecu_serial_); + registerHandlers(); } -void AktualizrSecondary::run() { - connectToPrimary(); - socket_server_.Run(); -} +PublicKey AktualizrSecondary::publicKey() const { return keys_->UptanePublicKey(); } + +Uptane::Manifest AktualizrSecondary::getManifest() const { + Uptane::InstalledImageInfo installed_image_info; + Uptane::Manifest manifest; + + if (getInstalledImageInfo(installed_image_info)) { + manifest = manifest_issuer_->assembleAndSignManifest(installed_image_info); + } -void AktualizrSecondary::stop() { /* TODO? */ + return manifest; } -Uptane::EcuSerial AktualizrSecondary::getSerialResp() const { return ecu_serial_; } +data::InstallationResult AktualizrSecondary::putMetadata(const Uptane::SecondaryMetadata& metadata) { + return verifyMetadata(metadata); +} -Uptane::HardwareIdentifier AktualizrSecondary::getHwIdResp() const { return hardware_id_; } +data::InstallationResult AktualizrSecondary::install() { + if (!pending_target_.IsValid()) { + LOG_ERROR << "Aborting target image installation; no valid target found."; + return data::InstallationResult(data::ResultCode::Numeric::kInternalError, + "Aborting target image installation; no valid target found."); + } -PublicKey AktualizrSecondary::getPublicKeyResp() const { return keys_.UptanePublicKey(); } + auto target_name = pending_target_.filename(); + auto result = installPendingTarget(pending_target_); -Json::Value AktualizrSecondary::getManifestResp() const { - Json::Value manifest = pacman->getManifest(getSerialResp()); + switch (result.result_code.num_code) { + case data::ResultCode::Numeric::kOk: { + storage_->saveInstalledVersion(ecu_serial_.ToString(), pending_target_, InstalledVersionUpdateMode::kCurrent); + pending_target_ = Uptane::Target::Unknown(); + LOG_INFO << "The target has been successfully installed: " << target_name; + break; + } + case data::ResultCode::Numeric::kNeedCompletion: { + storage_->saveInstalledVersion(ecu_serial_.ToString(), pending_target_, InstalledVersionUpdateMode::kPending); + LOG_INFO << "The target has been successfully installed, but a reboot is required to be applied: " << target_name; + break; + } + default: { + LOG_INFO << "Failed to install the target: " << target_name; + } + } - return keys_.signTuf(manifest); + return result; } -bool AktualizrSecondary::putMetadataResp(const Uptane::RawMetaPack& meta_pack) { +data::InstallationResult AktualizrSecondary::verifyMetadata(const Uptane::SecondaryMetadata& metadata) { + // 5.4.4.2. Full verification https://uptane.github.io/uptane-standard/uptane-standard.html#metadata_verification + + // 1. Load and verify the current time or the most recent securely attested time. + // We trust the time that the given system/ECU provides. TimeStamp now(TimeStamp::Now()); - detected_attack_.clear(); - - // TODO: proper partial verification - root_ = Uptane::Root(Uptane::RepositoryType::Director(), Utils::parseJSON(meta_pack.director_root), root_); - Uptane::Targets targets(Uptane::RepositoryType::Director(), Uptane::Role::Targets(), - Utils::parseJSON(meta_pack.director_targets), std::make_shared(root_)); - if (meta_targets_.version() > targets.version()) { - detected_attack_ = "Rollback attack detected"; - return true; - } - meta_targets_ = targets; - std::vector::const_iterator it; - bool target_found = false; - for (it = meta_targets_.targets.begin(); it != meta_targets_.targets.end(); ++it) { - if (it->IsForSecondary(getSerialResp())) { - if (target_found) { - detected_attack_ = "Duplicate entry for this ECU"; - break; - } - target_found = true; - target_ = std_::make_unique(*it); + + if (config_.uptane.verification_type == VerificationType::kFull) { + // 2. Download and check the Root metadata file from the Director repository. + // 3. NOT SUPPORTED: Download and check the Timestamp metadata file from the Director repository. + // 4. NOT SUPPORTED: Download and check the Snapshot metadata file from the Director repository. + // 5. Download and check the Targets metadata file from the Director repository. + try { + director_repo_.updateMeta(*storage_, metadata); + } catch (const std::exception& e) { + LOG_ERROR << "Failed to update Director metadata: " << e.what(); + return data::InstallationResult(data::ResultCode::Numeric::kVerificationFailed, + std::string("Failed to update Director metadata: ") + e.what()); } } - storage_->storeRoot(meta_pack.director_root, Uptane::RepositoryType::Director(), Uptane::Version(root_.version())); - storage_->storeNonRoot(meta_pack.director_targets, Uptane::RepositoryType::Director(), Uptane::Role::Targets()); - return true; + // 6. Download and check the Root metadata file from the Image repository. + // 7. Download and check the Timestamp metadata file from the Image repository. + // 8. Download and check the Snapshot metadata file from the Image repository. + // 9. Download and check the top-level Targets metadata file from the Image repository. + try { + image_repo_.updateMeta(*storage_, metadata); + } catch (const std::exception& e) { + LOG_ERROR << "Failed to update Image repo metadata: " << e.what(); + return data::InstallationResult(data::ResultCode::Numeric::kVerificationFailed, + std::string("Failed to update Image repo metadata: ") + e.what()); + } + + data::InstallationResult result = findTargets(); + if (result.isSuccess()) { + LOG_INFO << "Metadata verified, new update found."; + } + return result; } -int32_t AktualizrSecondary::getRootVersionResp(bool director) const { - std::string root_meta; - if (!storage_->loadLatestRoot(&root_meta, - (director) ? Uptane::RepositoryType::Director() : Uptane::RepositoryType::Image())) { - LOG_ERROR << "Could not load root metadata"; - return -1; +void AktualizrSecondary::initPendingTargetIfAny() { + try { + if (config_.uptane.verification_type == VerificationType::kFull) { + director_repo_.checkMetaOffline(*storage_); + } + image_repo_.checkMetaOffline(*storage_); + } catch (const std::exception& e) { + LOG_INFO << "No valid metadata found in storage."; + return; } - return Uptane::extractVersionUntrusted(root_meta); + findTargets(); } -bool AktualizrSecondary::putRootResp(const std::string& root, bool director) { - (void)root; - (void)director; - LOG_ERROR << "putRootResp is not implemented yet"; - return false; +data::InstallationResult AktualizrSecondary::findTargets() { + std::vector targetsForThisEcu; + if (config_.uptane.verification_type == VerificationType::kFull) { + // 10. Verify that Targets metadata from the Director and Image repositories match. + if (!director_repo_.matchTargetsWithImageTargets(image_repo_.getTargets())) { + LOG_ERROR << "Targets metadata from the Director and Image repositories do not match"; + return data::InstallationResult(data::ResultCode::Numeric::kVerificationFailed, + "Targets metadata from the Director and Image repositories do not match"); + } + + targetsForThisEcu = director_repo_.getTargets(serial(), hwID()); + } else { + const auto& targets = image_repo_.getTargets()->targets; + for (auto it = targets.begin(); it != targets.end(); ++it) { + auto hwids = it->hardwareIds(); + auto found_loc = std::find(hwids.cbegin(), hwids.cend(), hwID()); + if (found_loc != hwids.end()) { + if (!targetsForThisEcu.empty()) { + auto previous = boost::make_optional(false, 0); + auto current = boost::make_optional(false, 0); + try { + previous = boost::lexical_cast(targetsForThisEcu[0].custom_version()); + } catch (const boost::bad_lexical_cast&) { + LOG_TRACE << "Unable to parse Target custom version: " << targetsForThisEcu[0].custom_version(); + } + try { + current = boost::lexical_cast(it->custom_version()); + } catch (const boost::bad_lexical_cast&) { + LOG_TRACE << "Unable to parse Target custom version: " << it->custom_version(); + } + if (!previous && !current) { // NOLINT(bugprone-branch-clone) + // No versions: add this to the vector. + } else if (!previous) { // NOLINT(bugprone-branch-clone) + // Previous Target didn't have a version but this does; replace existing Targets with this. + targetsForThisEcu.clear(); + } else if (!current) { // NOLINT(bugprone-branch-clone) + // Current Target doesn't have a version but previous does; ignore this. + continue; + } else if (previous < current) { + // Current Target is newer; replace existing Targets with this. + targetsForThisEcu.clear(); + } else if (previous > current) { + // Current Target is older; ignore it. + continue; + } else { + // Same version: add it to the vector. + } + } else { + // First matching Target found; add it to the vector. + } + + targetsForThisEcu.push_back(*it); + } + } + } + + if (targetsForThisEcu.size() != 1) { + LOG_ERROR << "Invalid number of targets (should be 1): " << targetsForThisEcu.size(); + return data::InstallationResult( + data::ResultCode::Numeric::kVerificationFailed, + "Invalid number of targets (should be 1): " + std::to_string(targetsForThisEcu.size())); + } + + if (!isTargetSupported(targetsForThisEcu[0])) { + LOG_ERROR << "The given target type is not supported: " << targetsForThisEcu[0].type(); + return data::InstallationResult(data::ResultCode::Numeric::kVerificationFailed, + "The given target type is not supported: " + targetsForThisEcu[0].type()); + } + + pending_target_ = targetsForThisEcu[0]; + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); } -bool AktualizrSecondary::sendFirmwareResp(const std::shared_ptr& firmware) { - if (target_ == nullptr) { - LOG_ERROR << "No valid installation target found"; - return false; +void AktualizrSecondary::uptaneInitialize() { + if (keys_->generateUptaneKeyPair().empty()) { + throw std::runtime_error("Failed to generate Uptane key pair"); } - std::string treehub_server; + // from uptane/initialize.cc but we only take care of our own serial/hwid + EcuSerials ecu_serials; - if (target_->IsOstree()) { - // this is the ostree specific case - try { - std::string ca, cert, pkey, server_url; - extractCredentialsArchive(*firmware, &ca, &cert, &pkey, &treehub_server); - keys_.loadKeys(&ca, &cert, &pkey); - boost::trim(server_url); - treehub_server = server_url; - } catch (std::runtime_error& exc) { - LOG_ERROR << exc.what(); - - return false; + if (storage_->loadEcuSerials(&ecu_serials)) { + ecu_serial_ = ecu_serials[0].first; + hardware_id_ = ecu_serials[0].second; + return; + } + + std::string ecu_serial_local = config_.uptane.ecu_serial; + if (ecu_serial_local.empty()) { + ecu_serial_local = keys_->UptanePublicKey().KeyId(); + } + + std::string ecu_hardware_id = config_.uptane.ecu_hardware_id; + if (ecu_hardware_id.empty()) { + ecu_hardware_id = Utils::getHostname(); + if (ecu_hardware_id.empty()) { + throw std::runtime_error("Failed to define ECU hardware ID"); } } - data::InstallationResult install_res; + ecu_serials.emplace_back(Uptane::EcuSerial(ecu_serial_local), Uptane::HardwareIdentifier(ecu_hardware_id)); + storage_->storeEcuSerials(ecu_serials); + ecu_serial_ = ecu_serials[0].first; + hardware_id_ = ecu_serials[0].second; - if (target_->IsOstree()) { -#ifdef BUILD_OSTREE - install_res = OstreeManager::pull(config_.pacman.sysroot, treehub_server, keys_, *target_); + // this is a way to find out and store a value of the target name that is installed + // at the initial/provisioning stage and included into a device manifest + // i.e. 'filepath' field or ["signed"]["installed_image"]["filepath"] + // this value must match the value pushed to the backend during the bitbaking process, + // specifically, at its OSTree push phase and is equal to + // GARAGE_TARGET_NAME ?= "${OSTREE_BRANCHNAME}" which in turn is equal to OSTREE_BRANCHNAME ?= "${SOTA_HARDWARE_ID}" + // therefore, by default GARAGE_TARGET_NAME == OSTREE_BRANCHNAME == SOTA_HARDWARE_ID + // If there is no match then the backend/UI will not render/highlight currently installed version at all/correctly + storage_->importInstalledVersions(config_.import.base_path); +} - if (install_res.result_code.num_code != data::ResultCode::Numeric::kOk) { - LOG_ERROR << "Could not pull from OSTree (" << install_res.result_code.toString() - << "): " << install_res.description; - return false; - } -#else - LOG_ERROR << "Could not pull from OSTree. Aktualizr was built without OSTree support!"; - return false; -#endif - } else if (pacman->name() == "debian") { - // TODO save debian package here. - LOG_ERROR << "Installation of non ostree images is not suppotrted yet."; - return false; +void AktualizrSecondary::registerHandlers() { + registerHandler(AKIpUptaneMes_PR_getInfoReq, + std::bind(&AktualizrSecondary::getInfoHdlr, this, std::placeholders::_1, std::placeholders::_2)); + + registerHandler(AKIpUptaneMes_PR_versionReq, + std::bind(&AktualizrSecondary::versionHdlr, std::placeholders::_1, std::placeholders::_2)); + + registerHandler(AKIpUptaneMes_PR_manifestReq, + std::bind(&AktualizrSecondary::getManifestHdlr, this, std::placeholders::_1, std::placeholders::_2)); + + registerHandler(AKIpUptaneMes_PR_rootVerReq, + std::bind(&AktualizrSecondary::getRootVerHdlr, this, std::placeholders::_1, std::placeholders::_2)); + + registerHandler(AKIpUptaneMes_PR_putRootReq, + std::bind(&AktualizrSecondary::putRootHdlr, this, std::placeholders::_1, std::placeholders::_2)); + + registerHandler(AKIpUptaneMes_PR_putMetaReq2, + std::bind(&AktualizrSecondary::putMetaHdlr, this, std::placeholders::_1, std::placeholders::_2)); + + registerHandler(AKIpUptaneMes_PR_installReq, + std::bind(&AktualizrSecondary::installHdlr, this, std::placeholders::_1, std::placeholders::_2)); +} + +MsgHandler::ReturnCode AktualizrSecondary::getInfoHdlr(Asn1Message& in_msg, Asn1Message& out_msg) const { + (void)in_msg; + LOG_INFO << "Received an information request message; sending requested information."; + + out_msg.present(AKIpUptaneMes_PR_getInfoResp); + auto info_resp = out_msg.getInfoResp(); + + SetString(&info_resp->ecuSerial, serial().ToString()); + SetString(&info_resp->hwId, hwID().ToString()); + info_resp->keyType = static_cast(publicKey().Type()); + SetString(&info_resp->key, publicKey().Value()); + + return ReturnCode::kOk; +} + +MsgHandler::ReturnCode AktualizrSecondary::versionHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + const uint32_t version = 2; + auto version_req = in_msg.versionReq(); + const auto primary_version = static_cast(version_req->version); + if (primary_version < version) { + LOG_ERROR << "Primary protocol version is " << primary_version << " but Secondary version is " << version + << "! Communication will most likely fail!"; + } else if (primary_version > version) { + LOG_INFO << "Primary protocol version is " << primary_version << " but Secondary version is " << version + << ". Please consider upgrading the Secondary."; } - install_res = pacman->install(*target_); - if (install_res.result_code.num_code != data::ResultCode::Numeric::kOk) { - LOG_ERROR << "Could not install target (" << install_res.result_code.toString() << "): " << install_res.description; - return false; + auto m = out_msg.present(AKIpUptaneMes_PR_versionResp).versionResp(); + m->version = version; + + return ReturnCode::kOk; +} + +AktualizrSecondary::ReturnCode AktualizrSecondary::getManifestHdlr(Asn1Message& in_msg, Asn1Message& out_msg) const { + (void)in_msg; + if (last_msg_ != AKIpUptaneMes_PR_manifestReq) { + LOG_INFO << "Received a manifest request message; sending requested manifest."; + } else { + LOG_DEBUG << "Received another manifest request message; sending the same manifest."; } - storage_->saveInstalledVersion(getSerialResp().ToString(), *target_, InstalledVersionUpdateMode::kCurrent); - return true; + + out_msg.present(AKIpUptaneMes_PR_manifestResp); + auto manifest_resp = out_msg.manifestResp(); + manifest_resp->manifest.present = manifest_PR_json; + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + SetString(&manifest_resp->manifest.choice.json, Utils::jsonToStr(getManifest())); + + LOG_TRACE << "Manifest: \n" << getManifest(); + return ReturnCode::kOk; } -void AktualizrSecondary::extractCredentialsArchive(const std::string& archive, std::string* ca, std::string* cert, - std::string* pkey, std::string* treehub_server) { - { - std::stringstream as(archive); - *ca = Utils::readFileFromArchive(as, "ca.pem"); +AktualizrSecondary::ReturnCode AktualizrSecondary::getRootVerHdlr(Asn1Message& in_msg, Asn1Message& out_msg) const { + LOG_INFO << "Received a Root version request message."; + auto rv = in_msg.rootVerReq(); + Uptane::RepositoryType repo_type{}; + if (rv->repotype == AKRepoType_director) { + repo_type = Uptane::RepositoryType::Director(); + } else if (rv->repotype == AKRepoType_image) { + repo_type = Uptane::RepositoryType::Image(); + } else { + LOG_WARNING << "Received Root version request with invalid repo type: " << rv->repotype; + repo_type = Uptane::RepositoryType(-1); } - { - std::stringstream as(archive); - *cert = Utils::readFileFromArchive(as, "client.pem"); + + int32_t root_version = -1; + if (repo_type == Uptane::RepositoryType::Director()) { + root_version = director_repo_.rootVersion(); + } else if (repo_type == Uptane::RepositoryType::Image()) { + root_version = image_repo_.rootVersion(); } - { - std::stringstream as(archive); - *pkey = Utils::readFileFromArchive(as, "pkey.pem"); + LOG_DEBUG << "Current " << repo_type << " repo Root metadata version: " << root_version; + + auto m = out_msg.present(AKIpUptaneMes_PR_rootVerResp).rootVerResp(); + m->version = root_version; + + return ReturnCode::kOk; +} + +AktualizrSecondary::ReturnCode AktualizrSecondary::putRootHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + LOG_INFO << "Received a put Root request message; verifying contents..."; + auto pr = in_msg.putRootReq(); + Uptane::RepositoryType repo_type{}; + if (pr->repotype == AKRepoType_director) { + repo_type = Uptane::RepositoryType::Director(); + } else if (pr->repotype == AKRepoType_image) { + repo_type = Uptane::RepositoryType::Image(); + } else { + repo_type = Uptane::RepositoryType(-1); } - { - std::stringstream as(archive); - *treehub_server = Utils::readFileFromArchive(as, "server.url", true); + + const std::string json = ToString(pr->json); + LOG_DEBUG << "Received " << repo_type << " repo Root metadata:\n" << json; + data::InstallationResult result(data::ResultCode::Numeric::kOk, ""); + + if (repo_type == Uptane::RepositoryType::Director()) { + if (config_.uptane.verification_type == VerificationType::kTuf) { + LOG_WARNING << "Ignoring new Director Root metadata as it is unnecessary for TUF verification."; + result = + data::InstallationResult(data::ResultCode::Numeric::kInternalError, + "Ignoring new Director Root metadata as it is unnecessary for TUF verification."); + } else { + try { + director_repo_.verifyRoot(json); + storage_->storeRoot(json, repo_type, Uptane::Version(director_repo_.rootVersion())); + storage_->clearNonRootMeta(repo_type); + } catch (const std::exception& e) { + LOG_ERROR << "Failed to update Director Root metadata: " << e.what(); + result = data::InstallationResult(data::ResultCode::Numeric::kVerificationFailed, + std::string("Failed to update Director Root metadata: ") + e.what()); + } + } + } else if (repo_type == Uptane::RepositoryType::Image()) { + try { + image_repo_.verifyRoot(json); + storage_->storeRoot(json, repo_type, Uptane::Version(image_repo_.rootVersion())); + storage_->clearNonRootMeta(repo_type); + } catch (const std::exception& e) { + LOG_ERROR << "Failed to update Image repo Root metadata: " << e.what(); + result = data::InstallationResult(data::ResultCode::Numeric::kVerificationFailed, + std::string("Failed to update Image repo Root metadata: ") + e.what()); + } + } else { + LOG_WARNING << "Received Root version request with invalid repo type: " << pr->repotype; + result = data::InstallationResult( + data::ResultCode::Numeric::kInternalError, + "Received Root version request with invalid repo type: " + std::to_string(pr->repotype)); } -} -void AktualizrSecondary::connectToPrimary() { - Socket socket(config_.network.primary_ip, config_.network.primary_port); + auto m = out_msg.present(AKIpUptaneMes_PR_putRootResp).putRootResp(); + m->result = static_cast(result.result_code.num_code); + SetString(&m->description, result.description); + + return ReturnCode::kOk; +} - if (socket.bind(config_.network.port) != 0) { - LOG_ERROR << "Failed to bind a connection socket to the secondary's port"; +void AktualizrSecondary::copyMetadata(Uptane::MetaBundle& meta_bundle, const Uptane::RepositoryType repo, + const Uptane::Role& role, std::string& json) { + auto key = std::make_pair(repo, role); + if (meta_bundle.count(key) > 0) { + LOG_WARNING << repo << " metadata in contains multiple " << role << " objects."; return; } + meta_bundle.emplace(key, std::move(json)); +} + +AktualizrSecondary::ReturnCode AktualizrSecondary::putMetaHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + LOG_INFO << "Received a put metadata request message; verifying contents..."; + auto md = in_msg.putMetaReq2(); + Uptane::MetaBundle meta_bundle; + + if (config_.uptane.verification_type == VerificationType::kFull && + md->directorRepo.present == directorRepo_PR_collection) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + const int director_meta_count = md->directorRepo.choice.collection.list.count; + for (int i = 0; i < director_meta_count; i++) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic, cppcoreguidelines-pro-type-union-access) + const AKMetaJson_t object = *md->directorRepo.choice.collection.list.array[i]; + const std::string role = ToString(object.role); + std::string json = ToString(object.json); + LOG_DEBUG << "Received Director repo " << role << " metadata:\n" << json; + if (role == Uptane::Role::ROOT) { + copyMetadata(meta_bundle, Uptane::RepositoryType::Director(), Uptane::Role::Root(), json); + } else if (role == Uptane::Role::TARGETS) { + copyMetadata(meta_bundle, Uptane::RepositoryType::Director(), Uptane::Role::Targets(), json); + } else { + LOG_WARNING << "Director metadata in unknown format:" << md->directorRepo.present; + } + } + } + + if (md->imageRepo.present == imageRepo_PR_collection) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + const int image_meta_count = md->imageRepo.choice.collection.list.count; + for (int i = 0; i < image_meta_count; i++) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic, cppcoreguidelines-pro-type-union-access) + const AKMetaJson_t object = *md->imageRepo.choice.collection.list.array[i]; + const std::string role = ToString(object.role); + std::string json = ToString(object.json); + LOG_DEBUG << "Received Image repo " << role << " metadata:\n" << json; + if (role == Uptane::Role::ROOT) { + copyMetadata(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Root(), json); + } else if (role == Uptane::Role::TIMESTAMP) { + copyMetadata(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp(), json); + } else if (role == Uptane::Role::SNAPSHOT) { + copyMetadata(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot(), json); + } else if (role == Uptane::Role::TARGETS) { + copyMetadata(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Targets(), json); + } else { + LOG_WARNING << "Image metadata in unknown format:" << md->imageRepo.present; + } + } + } - if (socket.connect() == 0) { - LOG_INFO << "Connected to Primary, sending info about this secondary..."; - socket_server_.HandleOneConnection(socket.getFD()); + size_t expected_items; + if (config_.uptane.verification_type == VerificationType::kTuf) { + expected_items = 4; } else { - LOG_INFO << "Failed to connect to Primary"; + expected_items = 6; + } + if (meta_bundle.size() != expected_items) { + LOG_WARNING << "Metadata received from Primary is incomplete. Expected size: " << expected_items + << " Received: " << meta_bundle.size(); + } + + data::InstallationResult result = putMetadata(meta_bundle); + + auto m = out_msg.present(AKIpUptaneMes_PR_putMetaResp2).putMetaResp2(); + m->result = static_cast(result.result_code.num_code); + SetString(&m->description, result.description); + + return ReturnCode::kOk; +} + +AktualizrSecondary::ReturnCode AktualizrSecondary::installHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + (void)in_msg; + LOG_INFO << "Received an installation request message; attempting installation..."; + auto result = install(); + + auto m = out_msg.present(AKIpUptaneMes_PR_installResp2).installResp2(); + m->result = static_cast(result.result_code.num_code); + SetString(&m->description, result.description); + + if (data::ResultCode::Numeric::kNeedCompletion == result.result_code.num_code) { + return ReturnCode::kRebootRequired; } + + return ReturnCode::kOk; } diff --git a/src/aktualizr_secondary/aktualizr_secondary.h b/src/aktualizr_secondary/aktualizr_secondary.h index 8ff68889cc..3fc4c80e0b 100644 --- a/src/aktualizr_secondary/aktualizr_secondary.h +++ b/src/aktualizr_secondary/aktualizr_secondary.h @@ -1,42 +1,81 @@ #ifndef AKTUALIZR_SECONDARY_H #define AKTUALIZR_SECONDARY_H -#include - -#include "aktualizr_secondary_common.h" #include "aktualizr_secondary_config.h" -#include "aktualizr_secondary_interface.h" -#include "crypto/keymanager.h" -#include "socket_server.h" -#include "storage/invstorage.h" -#include "uptane/tuf.h" -#include "utilities/types.h" -#include "utilities/utils.h" - -class AktualizrSecondary : public AktualizrSecondaryInterface, private AktualizrSecondaryCommon { +#include "msg_handler.h" +#include "uptane/directorrepository.h" +#include "uptane/imagerepository.h" +#include "uptane/manifest.h" +#include "uptane/secondary_metadata.h" + +class UpdateAgent; +class INvStorage; +class KeyManager; + +class AktualizrSecondary : public MsgDispatcher { public: - AktualizrSecondary(const AktualizrSecondaryConfig& config, const std::shared_ptr& storage); - void run() override; - void stop() override; - - // implementation of primary's SecondaryInterface - Uptane::EcuSerial getSerialResp() const; - Uptane::HardwareIdentifier getHwIdResp() const; - PublicKey getPublicKeyResp() const; - Json::Value getManifestResp() const; - bool putMetadataResp(const Uptane::RawMetaPack& meta_pack); - int32_t getRootVersionResp(bool director) const; - bool putRootResp(const std::string& root, bool director); - bool sendFirmwareResp(const std::shared_ptr& firmware); - - static void extractCredentialsArchive(const std::string& archive, std::string* ca, std::string* cert, - std::string* pkey, std::string* treehub_server); + using Ptr = std::shared_ptr; - private: - void connectToPrimary(); + virtual void initialize() = 0; + const Uptane::EcuSerial& serial() const { return ecu_serial_; } + const Uptane::HardwareIdentifier& hwID() const { return hardware_id_; } + PublicKey publicKey() const; + Uptane::Manifest getManifest() const; + const Uptane::Target& getPendingTarget() const { return pending_target_; } + + virtual data::InstallationResult putMetadata(const Uptane::SecondaryMetadata& metadata); + virtual data::InstallationResult putMetadata(const Uptane::MetaBundle& meta_bundle) { + return putMetadata(Uptane::SecondaryMetadata(meta_bundle)); + } + + virtual data::InstallationResult install(); + virtual void completeInstall() = 0; + + protected: + AktualizrSecondary(AktualizrSecondaryConfig config, std::shared_ptr storage); + + // protected interface to be defined by child classes, i.e. a specific IP secondary type (e.g. OSTree, File, etc) + virtual bool getInstalledImageInfo(Uptane::InstalledImageInfo& installed_image_info) const = 0; + virtual bool isTargetSupported(const Uptane::Target& target) const = 0; + virtual data::InstallationResult installPendingTarget(const Uptane::Target& target) = 0; + virtual data::InstallationResult applyPendingInstall(const Uptane::Target& target) = 0; + + // protected interface to be used by child classes + std::shared_ptr& storage() { return storage_; } + Uptane::DirectorRepository& directorRepo() { return director_repo_; } + std::shared_ptr& keyMngr() { return keys_; } + + void initPendingTargetIfAny(); private: - SocketServer socket_server_; + static void copyMetadata(Uptane::MetaBundle& meta_bundle, Uptane::RepositoryType repo, const Uptane::Role& role, + std::string& json); + data::InstallationResult verifyMetadata(const Uptane::SecondaryMetadata& metadata); + data::InstallationResult findTargets(); + void uptaneInitialize(); + void registerHandlers(); + + // Message handlers + ReturnCode getInfoHdlr(Asn1Message& in_msg, Asn1Message& out_msg) const; + static ReturnCode versionHdlr(Asn1Message& in_msg, Asn1Message& out_msg); + ReturnCode getManifestHdlr(Asn1Message& in_msg, Asn1Message& out_msg) const; + ReturnCode getRootVerHdlr(Asn1Message& in_msg, Asn1Message& out_msg) const; + ReturnCode putRootHdlr(Asn1Message& in_msg, Asn1Message& out_msg); + ReturnCode putMetaHdlr(Asn1Message& in_msg, Asn1Message& out_msg); + ReturnCode installHdlr(Asn1Message& in_msg, Asn1Message& out_msg); + + Uptane::HardwareIdentifier hardware_id_{Uptane::HardwareIdentifier::Unknown()}; + Uptane::EcuSerial ecu_serial_{Uptane::EcuSerial::Unknown()}; + + const AktualizrSecondaryConfig config_; + std::shared_ptr storage_; + std::shared_ptr keys_; + + Uptane::ManifestIssuer::Ptr manifest_issuer_; + + Uptane::DirectorRepository director_repo_; + Uptane::ImageRepository image_repo_; + Uptane::Target pending_target_{Uptane::Target::Unknown()}; }; #endif // AKTUALIZR_SECONDARY_H diff --git a/src/aktualizr_secondary/aktualizr_secondary_common.cc b/src/aktualizr_secondary/aktualizr_secondary_common.cc deleted file mode 100644 index eaa5404618..0000000000 --- a/src/aktualizr_secondary/aktualizr_secondary_common.cc +++ /dev/null @@ -1,61 +0,0 @@ -#include "aktualizr_secondary_common.h" -#include "package_manager/packagemanagerfactory.h" -#include "utilities/utils.h" - -AktualizrSecondaryCommon::AktualizrSecondaryCommon(const AktualizrSecondaryConfig &config, - std::shared_ptr storage) - : config_(config), - storage_(std::move(storage)), - keys_(storage_, config.keymanagerConfig()), - ecu_serial_(Uptane::EcuSerial::Unknown()), - hardware_id_(Uptane::HardwareIdentifier::Unknown()) { - pacman = PackageManagerFactory::makePackageManager(config_.pacman, storage_, nullptr, nullptr); - - // Load Root keys from storage - std::string root; - storage_->loadLatestRoot(&root, Uptane::RepositoryType::Director()); - if (root.size() > 0) { - LOG_DEBUG << "Loading root.json:" << root; - root_ = Uptane::Root(Uptane::RepositoryType::Director(), Utils::parseJSON(root)); - } else { - LOG_INFO << "No root.json in local storage, defaulting will accept the first root.json provided"; - root_ = Uptane::Root(Uptane::Root::Policy::kAcceptAll); - } -} - -bool AktualizrSecondaryCommon::uptaneInitialize() { - if (keys_.generateUptaneKeyPair().size() == 0) { - LOG_ERROR << "Failed to generate uptane key pair"; - return false; - } - - // from uptane/initialize.cc but we only take care of our own serial/hwid - EcuSerials ecu_serials; - - if (storage_->loadEcuSerials(&ecu_serials)) { - ecu_serial_ = ecu_serials[0].first; - hardware_id_ = ecu_serials[0].second; - - return true; - } - - std::string ecu_serial_local = config_.uptane.ecu_serial; - if (ecu_serial_local.empty()) { - ecu_serial_local = keys_.UptanePublicKey().KeyId(); - } - - std::string ecu_hardware_id = config_.uptane.ecu_hardware_id; - if (ecu_hardware_id.empty()) { - ecu_hardware_id = Utils::getHostname(); - if (ecu_hardware_id == "") { - return false; - } - } - - ecu_serials.emplace_back(Uptane::EcuSerial(ecu_serial_local), Uptane::HardwareIdentifier(ecu_hardware_id)); - storage_->storeEcuSerials(ecu_serials); - ecu_serial_ = ecu_serials[0].first; - hardware_id_ = ecu_serials[0].second; - - return true; -} diff --git a/src/aktualizr_secondary/aktualizr_secondary_common.h b/src/aktualizr_secondary/aktualizr_secondary_common.h deleted file mode 100644 index a1605cbbf7..0000000000 --- a/src/aktualizr_secondary/aktualizr_secondary_common.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef AKTUALIZR_SECONDARY_COMMON_H_ -#define AKTUALIZR_SECONDARY_COMMON_H_ - -#include -#include -#include - -#include "aktualizr_secondary_config.h" -#include "crypto/keymanager.h" -#include "package_manager/packagemanagerinterface.h" -#include "storage/invstorage.h" - -class AktualizrSecondaryCommon { - public: - AktualizrSecondaryCommon(const AktualizrSecondaryConfig& /*config*/, std::shared_ptr /*storage*/); - - bool uptaneInitialize(); - - AktualizrSecondaryConfig config_; - - std::shared_ptr storage_; - KeyManager keys_; - Uptane::EcuSerial ecu_serial_; - Uptane::HardwareIdentifier hardware_id_; - std::shared_ptr pacman; - Uptane::Root root_; - Uptane::Targets meta_targets_; - std::string detected_attack_; - std::unique_ptr target_; -}; - -#endif // AKTUALIZR_SECONDARY_COMMON_H_ diff --git a/src/aktualizr_secondary/aktualizr_secondary_config.cc b/src/aktualizr_secondary/aktualizr_secondary_config.cc index c3ff1c109b..b246cf870d 100644 --- a/src/aktualizr_secondary/aktualizr_secondary_config.cc +++ b/src/aktualizr_secondary/aktualizr_secondary_config.cc @@ -2,6 +2,17 @@ #include +#include "utilities/config_utils.h" + +template <> +inline void CopyFromConfig(VerificationType& dest, const std::string& option_name, + const boost::property_tree::ptree& pt) { + boost::optional value = pt.get_optional(option_name); + if (value.is_initialized()) { + dest = Uptane::VerificationTypeFromString(StripQuotesFromStrings(value.get())); + } +} + void AktualizrSecondaryNetConfig::updateFromPropertyTree(const boost::property_tree::ptree& pt) { CopyFromConfig(port, "port", pt); CopyFromConfig(primary_ip, "primary_ip", pt); @@ -19,6 +30,8 @@ void AktualizrSecondaryUptaneConfig::updateFromPropertyTree(const boost::propert CopyFromConfig(ecu_hardware_id, "ecu_hardware_id", pt); CopyFromConfig(key_source, "key_source", pt); CopyFromConfig(key_type, "key_type", pt); + CopyFromConfig(force_install_completion, "force_install_completion", pt); + CopyFromConfig(verification_type, "verification_type", pt); } void AktualizrSecondaryUptaneConfig::writeToStream(std::ostream& out_stream) const { @@ -26,6 +39,8 @@ void AktualizrSecondaryUptaneConfig::writeToStream(std::ostream& out_stream) con writeOption(out_stream, ecu_hardware_id, "ecu_hardware_id"); writeOption(out_stream, key_source, "key_source"); writeOption(out_stream, key_type, "key_type"); + writeOption(out_stream, force_install_completion, "force_install_completion"); + writeOption(out_stream, verification_type, "verification_type"); } AktualizrSecondaryConfig::AktualizrSecondaryConfig(const boost::program_options::variables_map& cmd) { @@ -95,6 +110,8 @@ void AktualizrSecondaryConfig::updateFromPropertyTree(const boost::property_tree CopySubtreeFromConfig(p11, "p11", pt); CopySubtreeFromConfig(pacman, "pacman", pt); CopySubtreeFromConfig(storage, "storage", pt); + CopySubtreeFromConfig(import, "import", pt); + CopySubtreeFromConfig(bootloader, "bootloader", pt); } void AktualizrSecondaryConfig::writeToStream(std::ostream& sink) const { @@ -107,6 +124,8 @@ void AktualizrSecondaryConfig::writeToStream(std::ostream& sink) const { WriteSectionToStream(p11, "p11", sink); WriteSectionToStream(pacman, "pacman", sink); WriteSectionToStream(storage, "storage", sink); + WriteSectionToStream(import, "import", sink); + WriteSectionToStream(bootloader, "bootloader", sink); } std::ostream& operator<<(std::ostream& os, const AktualizrSecondaryConfig& cfg) { diff --git a/src/aktualizr_secondary/aktualizr_secondary_config.h b/src/aktualizr_secondary/aktualizr_secondary_config.h index 0537649e85..65116a52d7 100644 --- a/src/aktualizr_secondary/aktualizr_secondary_config.h +++ b/src/aktualizr_secondary/aktualizr_secondary_config.h @@ -1,16 +1,15 @@ #ifndef AKTUALIZR_SECONDARY_CONFIG_H_ #define AKTUALIZR_SECONDARY_CONFIG_H_ -#include -#include -#include +#include // for in_port_t +#include // for path +#include // for variables_map +#include // for ptree +#include // for ostream +#include // for string -#include "crypto/keymanager_config.h" -#include "crypto/p11_config.h" -#include "logging/logging_config.h" -#include "package_manager/packagemanagerconfig.h" -#include "storage/storage_config.h" -#include "utilities/config_utils.h" +#include "libaktualizr/config.h" // for BaseConfig, Bootl... +#include "libaktualizr/types.h" // for CryptoSource, Key... // Try to keep the order of config options the same as in // AktualizrSecondaryConfig::writeToStream() and @@ -30,6 +29,8 @@ struct AktualizrSecondaryUptaneConfig { std::string ecu_hardware_id; CryptoSource key_source{CryptoSource::kFile}; KeyType key_type{KeyType::kRSA2048}; + bool force_install_completion{false}; + VerificationType verification_type{VerificationType::kFull}; void updateFromPropertyTree(const boost::property_tree::ptree& pt); void writeToStream(std::ostream& out_stream) const; @@ -38,7 +39,7 @@ struct AktualizrSecondaryUptaneConfig { class AktualizrSecondaryConfig : public BaseConfig { public: AktualizrSecondaryConfig() = default; - AktualizrSecondaryConfig(const boost::program_options::variables_map& cmd); + explicit AktualizrSecondaryConfig(const boost::program_options::variables_map& cmd); explicit AktualizrSecondaryConfig(const boost::filesystem::path& filename); KeyManagerConfig keymanagerConfig() const; @@ -46,16 +47,18 @@ class AktualizrSecondaryConfig : public BaseConfig { void postUpdateValues(); void writeToStream(std::ostream& sink) const; - // from primary config + // from Primary config LoggerConfig logger; AktualizrSecondaryNetConfig network; AktualizrSecondaryUptaneConfig uptane; - // from primary config + // from Primary config P11Config p11; PackageConfig pacman; + BootloaderConfig bootloader; StorageConfig storage; + ImportConfig import; private: void updateFromCommandLine(const boost::program_options::variables_map& cmd); diff --git a/src/aktualizr_secondary/aktualizr_secondary_config_test.cc b/src/aktualizr_secondary/aktualizr_secondary_config_test.cc index 2ac654148f..ac7cade422 100644 --- a/src/aktualizr_secondary/aktualizr_secondary_config_test.cc +++ b/src/aktualizr_secondary/aktualizr_secondary_config_test.cc @@ -1,6 +1,9 @@ #include #include "aktualizr_secondary_config.h" + +#include + #include "utilities/utils.h" TEST(aktualizr_secondary_config, config_initialized_values) { @@ -13,12 +16,16 @@ TEST(aktualizr_secondary_config, config_toml_parsing) { AktualizrSecondaryConfig conf("tests/config/aktualizr_secondary.toml"); EXPECT_EQ(conf.network.port, 9031); - - EXPECT_EQ(conf.pacman.type, PackageManager::kOstree); +#ifdef BUILD_OSTREE + EXPECT_EQ(conf.pacman.type, PACKAGE_MANAGER_OSTREE); +#else + EXPECT_EQ(conf.pacman.type, PACKAGE_MANAGER_NONE); +#endif EXPECT_EQ(conf.pacman.os, std::string("testos")); EXPECT_EQ(conf.pacman.sysroot, boost::filesystem::path("testsysroot")); EXPECT_EQ(conf.pacman.ostree_server, std::string("test_server")); EXPECT_EQ(conf.pacman.packages_file, boost::filesystem::path("/test_packages")); + EXPECT_EQ(conf.uptane.verification_type, VerificationType::kFull); } /* We don't normally dump the config to file, but we do write it to the log. */ diff --git a/src/aktualizr_secondary/aktualizr_secondary_file.cc b/src/aktualizr_secondary/aktualizr_secondary_file.cc new file mode 100644 index 0000000000..13e34a5911 --- /dev/null +++ b/src/aktualizr_secondary/aktualizr_secondary_file.cc @@ -0,0 +1,85 @@ +#include "aktualizr_secondary_file.h" + +#include "storage/invstorage.h" +#include "update_agent_file.h" + +const std::string AktualizrSecondaryFile::FileUpdateDefaultFile{"firmware.txt"}; + +AktualizrSecondaryFile::AktualizrSecondaryFile(const AktualizrSecondaryConfig& config) + : AktualizrSecondaryFile(config, INvStorage::newStorage(config.storage)) {} + +AktualizrSecondaryFile::AktualizrSecondaryFile(const AktualizrSecondaryConfig& config, + std::shared_ptr storage, + std::shared_ptr update_agent) + : AktualizrSecondary(config, std::move(storage)), update_agent_{std::move(update_agent)} { + registerHandler(AKIpUptaneMes_PR_uploadDataReq, std::bind(&AktualizrSecondaryFile::uploadDataHdlr, this, + std::placeholders::_1, std::placeholders::_2)); + if (!update_agent_) { + std::string current_target_name; + + boost::optional current_version; + boost::optional pending_version; + auto installed_version_res = + AktualizrSecondary::storage()->loadInstalledVersions("", ¤t_version, &pending_version); + + if (installed_version_res && !!current_version) { + current_target_name = current_version->filename(); + } else { + current_target_name = "unknown"; + } + + update_agent_ = std::make_shared(config.storage.path / FileUpdateDefaultFile, current_target_name); + } +} + +void AktualizrSecondaryFile::initialize() { initPendingTargetIfAny(); } + +data::InstallationResult AktualizrSecondaryFile::receiveData(const uint8_t* data, size_t size) { + if (!getPendingTarget().IsValid()) { + LOG_ERROR << "Aborting image download; no valid target found."; + return data::InstallationResult(data::ResultCode::Numeric::kGeneralError, + "Aborting image download; no valid target found."); + } + + return update_agent_->receiveData(getPendingTarget(), data, size); +} + +bool AktualizrSecondaryFile::isTargetSupported(const Uptane::Target& target) const { + return update_agent_->isTargetSupported(target); +} + +data::InstallationResult AktualizrSecondaryFile::applyPendingInstall(const Uptane::Target& target) { + return update_agent_->applyPendingInstall(target); +} + +bool AktualizrSecondaryFile::getInstalledImageInfo(Uptane::InstalledImageInfo& installed_image_info) const { + return update_agent_->getInstalledImageInfo(installed_image_info); +} + +data::InstallationResult AktualizrSecondaryFile::installPendingTarget(const Uptane::Target& target) { + return update_agent_->install(target); +} + +void AktualizrSecondaryFile::completeInstall() { return update_agent_->completeInstall(); } + +MsgHandler::ReturnCode AktualizrSecondaryFile::uploadDataHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + if (last_msg_ != AKIpUptaneMes_PR_uploadDataReq) { + LOG_INFO << "Received an initial data upload request message; attempting to receive data..."; + } else { + LOG_DEBUG << "Received another data upload request message; attempting to receive data..."; + } + + auto rec_buf_size = in_msg.uploadDataReq()->data.size; + if (rec_buf_size < 0) { + LOG_ERROR << "The received data buffer size is negative: " << rec_buf_size; + return ReturnCode::kOk; + } + + auto result = receiveData(in_msg.uploadDataReq()->data.buf, static_cast(rec_buf_size)); + + auto m = out_msg.present(AKIpUptaneMes_PR_uploadDataResp).uploadDataResp(); + m->result = static_cast(result.result_code.num_code); + SetString(&m->description, result.description); + + return ReturnCode::kOk; +} diff --git a/src/aktualizr_secondary/aktualizr_secondary_file.h b/src/aktualizr_secondary/aktualizr_secondary_file.h new file mode 100644 index 0000000000..f43cc1d956 --- /dev/null +++ b/src/aktualizr_secondary/aktualizr_secondary_file.h @@ -0,0 +1,34 @@ +#ifndef AKTUALIZR_SECONDARY_FILE_H +#define AKTUALIZR_SECONDARY_FILE_H + +#include + +#include "aktualizr_secondary.h" + +class FileUpdateAgent; + +class AktualizrSecondaryFile : public AktualizrSecondary { + public: + static const std::string FileUpdateDefaultFile; + + explicit AktualizrSecondaryFile(const AktualizrSecondaryConfig& config); + AktualizrSecondaryFile(const AktualizrSecondaryConfig& config, std::shared_ptr storage, + std::shared_ptr update_agent = nullptr); + + void initialize() override; + data::InstallationResult receiveData(const uint8_t* data, size_t size); + + protected: + bool getInstalledImageInfo(Uptane::InstalledImageInfo& installed_image_info) const override; + bool isTargetSupported(const Uptane::Target& target) const override; + data::InstallationResult installPendingTarget(const Uptane::Target& target) override; + data::InstallationResult applyPendingInstall(const Uptane::Target& target) override; + void completeInstall() override; + + ReturnCode uploadDataHdlr(Asn1Message& in_msg, Asn1Message& out_msg); + + private: + std::shared_ptr update_agent_; +}; + +#endif // AKTUALIZR_SECONDARY_FILE_H diff --git a/src/aktualizr_secondary/aktualizr_secondary_interface.h b/src/aktualizr_secondary/aktualizr_secondary_interface.h deleted file mode 100644 index 68329ac4d0..0000000000 --- a/src/aktualizr_secondary/aktualizr_secondary_interface.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef AKTUALIZR_SECONDARY_INTERFACE_H_ -#define AKTUALIZR_SECONDARY_INTERFACE_H_ - -class AktualizrSecondaryInterface { - public: - virtual ~AktualizrSecondaryInterface() = default; - virtual void run() = 0; - virtual void stop() = 0; -}; - -#endif // AKTUALIZR_SECONDARY_INTERFACE_H_ diff --git a/src/aktualizr_secondary/aktualizr_secondary_ostree.cc b/src/aktualizr_secondary/aktualizr_secondary_ostree.cc new file mode 100644 index 0000000000..cacd5a5e37 --- /dev/null +++ b/src/aktualizr_secondary/aktualizr_secondary_ostree.cc @@ -0,0 +1,97 @@ +#include "aktualizr_secondary_ostree.h" +#include "package_manager/ostreemanager.h" +#include "update_agent_ostree.h" + +AktualizrSecondaryOstree::AktualizrSecondaryOstree(const AktualizrSecondaryConfig& config) + : AktualizrSecondaryOstree(config, INvStorage::newStorage(config.storage)) {} + +AktualizrSecondaryOstree::AktualizrSecondaryOstree(const AktualizrSecondaryConfig& config, + const std::shared_ptr& storage) + : AktualizrSecondary(config, storage) { + registerHandler(AKIpUptaneMes_PR_downloadOstreeRevReq, std::bind(&AktualizrSecondaryOstree::downloadOstreeRev, this, + std::placeholders::_1, std::placeholders::_2)); + + std::shared_ptr pack_man = + std::make_shared(config.pacman, config.bootloader, AktualizrSecondary::storage(), nullptr); + update_agent_ = + std::make_shared(config.pacman.sysroot, keyMngr(), pack_man, config.uptane.ecu_hardware_id); +} + +void AktualizrSecondaryOstree::initialize() { + initPendingTargetIfAny(); + + if (hasPendingUpdate()) { + LOG_INFO << "Found a pending target to be applied."; + // TODO(OTA-4545): refactor this to make it simpler as we don't need to persist/store + // an installation status of each ECU but store it just for a given secondary ECU + std::vector installed_versions; + boost::optional pending_target; + AktualizrSecondary::storage()->loadInstalledVersions(serial().ToString(), nullptr, &pending_target); + + if (!!pending_target) { + data::InstallationResult install_res = + data::InstallationResult(data::ResultCode::Numeric::kUnknown, "Unknown installation error"); + LOG_INFO << "Pending update found; attempting to apply it. Target hash: " << pending_target->sha256Hash(); + + install_res = applyPendingInstall(*pending_target); + + if (install_res.result_code != data::ResultCode::Numeric::kNeedCompletion) { + AktualizrSecondary::storage()->saveEcuInstallationResult(serial(), install_res); + + if (install_res.isSuccess()) { + LOG_INFO << "Pending update has been successfully applied: " << pending_target->sha256Hash(); + AktualizrSecondary::storage()->saveInstalledVersion(serial().ToString(), *pending_target, + InstalledVersionUpdateMode::kCurrent); + } else { + LOG_ERROR << "Application of the pending update has failed (" << install_res.result_code.ToString() + << "): " << install_res.description; + AktualizrSecondary::storage()->saveInstalledVersion(serial().ToString(), *pending_target, + InstalledVersionUpdateMode::kNone); + } + + directorRepo().dropTargets(*AktualizrSecondary::storage()); + } else { + LOG_INFO << "Pending update hasn't been applied because a reboot hasn't been detected"; + } + } + } +} + +MsgHandler::ReturnCode AktualizrSecondaryOstree::downloadOstreeRev(Asn1Message& in_msg, Asn1Message& out_msg) { + LOG_INFO << "Received an OSTree download request; attempting download..."; + auto result = downloadOstreeUpdate(ToString(in_msg.downloadOstreeRevReq()->tlsCred)); + + auto m = out_msg.present(AKIpUptaneMes_PR_downloadOstreeRevResp).downloadOstreeRevResp(); + m->result = static_cast(result.result_code.num_code); + SetString(&m->description, result.description); + + return ReturnCode::kOk; +} + +data::InstallationResult AktualizrSecondaryOstree::downloadOstreeUpdate(const std::string& packed_tls_creds) { + if (!getPendingTarget().IsValid()) { + LOG_ERROR << "Aborting image download; no valid target found."; + return data::InstallationResult(data::ResultCode::Numeric::kGeneralError, + "Aborting image download; no valid target found."); + } + + return update_agent_->downloadTargetRev(getPendingTarget(), packed_tls_creds); +} + +bool AktualizrSecondaryOstree::isTargetSupported(const Uptane::Target& target) const { + return update_agent_->isTargetSupported(target); +} + +data::InstallationResult AktualizrSecondaryOstree::applyPendingInstall(const Uptane::Target& target) { + return update_agent_->applyPendingInstall(target); +} + +bool AktualizrSecondaryOstree::getInstalledImageInfo(Uptane::InstalledImageInfo& installed_image_info) const { + return update_agent_->getInstalledImageInfo(installed_image_info); +} + +data::InstallationResult AktualizrSecondaryOstree::installPendingTarget(const Uptane::Target& target) { + return update_agent_->install(target); +} + +void AktualizrSecondaryOstree::completeInstall() { return update_agent_->completeInstall(); } diff --git a/src/aktualizr_secondary/aktualizr_secondary_ostree.h b/src/aktualizr_secondary/aktualizr_secondary_ostree.h new file mode 100644 index 0000000000..b6f7c80fe6 --- /dev/null +++ b/src/aktualizr_secondary/aktualizr_secondary_ostree.h @@ -0,0 +1,32 @@ +#ifndef AKTUALIZR_SECONDARY_OSTREE_H +#define AKTUALIZR_SECONDARY_OSTREE_H + +#include "aktualizr_secondary.h" +#include "storage/invstorage.h" + +class OstreeUpdateAgent; + +class AktualizrSecondaryOstree : public AktualizrSecondary { + public: + explicit AktualizrSecondaryOstree(const AktualizrSecondaryConfig& config); + AktualizrSecondaryOstree(const AktualizrSecondaryConfig& config, const std::shared_ptr& storage); + + void initialize() override; + data::InstallationResult downloadOstreeUpdate(const std::string& packed_tls_creds); + + protected: + bool getInstalledImageInfo(Uptane::InstalledImageInfo& installed_image_info) const override; + bool isTargetSupported(const Uptane::Target& target) const override; + data::InstallationResult installPendingTarget(const Uptane::Target& target) override; + data::InstallationResult applyPendingInstall(const Uptane::Target& target) override; + void completeInstall() override; + + private: + bool hasPendingUpdate() { return storage()->hasPendingInstall(); } + + ReturnCode downloadOstreeRev(Asn1Message& in_msg, Asn1Message& out_msg); + + std::shared_ptr update_agent_; +}; + +#endif // AKTUALIZR_SECONDARY_OSTREE_H diff --git a/src/aktualizr_secondary/aktualizr_secondary_ostree_test.cc b/src/aktualizr_secondary/aktualizr_secondary_ostree_test.cc new file mode 100644 index 0000000000..2856bc6a9b --- /dev/null +++ b/src/aktualizr_secondary/aktualizr_secondary_ostree_test.cc @@ -0,0 +1,375 @@ +#include + +#include + +#include "boost/algorithm/string/trim.hpp" +#include "boost/process.hpp" + +#include "logging/logging.h" +#include "test_utils.h" + +#include "aktualizr_secondary_ostree.h" +#include "update_agent_ostree.h" +#include "uptane_repo.h" + +class Treehub { + public: + Treehub(const std::string& server_path) + : port_(TestUtils::getFreePort()), + url_("http://127.0.0.1:" + port_), + process_(server_path, "-p", port_, "-d", root_dir_.PathString(), "-s0.5", "--create") { + TestUtils::waitForServer(url() + "/"); + auto rev_process = Process("ostree").run({"rev-parse", "--repo", root_dir_.PathString(), "master"}); + EXPECT_EQ(std::get<0>(rev_process), 0) << std::get<2>(rev_process); + cur_rev_ = std::get<1>(rev_process); + boost::trim_right_if(cur_rev_, boost::is_any_of(" \t\r\n")); + + LOG_INFO << "Treehub is running on: " << port_ << " current revision: " << cur_rev_; + } + + ~Treehub() { + process_.terminate(); + process_.wait_for(std::chrono::seconds(10)); + if (process_.running()) { + LOG_ERROR << "Failed to stop Treehub server"; + } else { + LOG_INFO << "Treehub server has been stopped"; + } + } + + public: + const std::string& url() const { return url_; } + const std::string& curRev() const { return cur_rev_; } + + private: + TemporaryDirectory root_dir_; + const std::string port_; + const std::string url_; + boost::process::child process_; + std::string cur_rev_; +}; + +class OstreeRootfs { + public: + OstreeRootfs(const std::string& rootfs_template) { + auto sysroot_copy = Process("cp").run({"-r", rootfs_template, getPath().c_str()}); + EXPECT_EQ(std::get<0>(sysroot_copy), 0) + << "stdout: " << std::get<1>(sysroot_copy) << " stderr:" << std::get<2>(sysroot_copy); + resetDeployment(); + } + + const boost::filesystem::path& getPath() const { return sysroot_dir_; } + const char* getDeploymentRev() const { return rev_.c_str(); } + int getDeploymentSerial() const { return 0; } + const char* getOSName() const { return os_name_.c_str(); } + + OstreeDeployment* getDeployment() const { return deployment_.get(); } + void setNewDeploymentRev(const std::string& new_rev) { rev_ = new_rev; } + + void resetDeployment() { + Process::Result deployment_rev; + // When reset after the first time, this annoyingly returns an error code + // despite apparently succeeding. Retry to be extra safe. + for (int i = 0; i < 2; ++i) { + deployment_rev = Process("ostree").run( + {"rev-parse", std::string("--repo"), getPath().string() + "/ostree/repo", "generate-remote/generated"}); + if (std::get<0>(deployment_rev) == 0) { + break; + } + } + + EXPECT_EQ(std::get<0>(deployment_rev), 0) + << "stdout: " << std::get<1>(deployment_rev) << " stderr:" << std::get<2>(deployment_rev); + + rev_ = std::get<1>(deployment_rev); + boost::trim_right_if(rev_, boost::is_any_of(" \t\r\n")); + + deployment_.reset(ostree_deployment_new(0, getOSName(), getDeploymentRev(), getDeploymentSerial(), + getDeploymentRev(), getDeploymentSerial())); + } + + private: + struct OstreeDeploymentDeleter { + void operator()(OstreeDeployment* e) const { g_object_unref(reinterpret_cast(e)); } + }; + + const std::string os_name_{"dummy-os"}; + TemporaryDirectory tmp_dir_; + boost::filesystem::path sysroot_dir_{tmp_dir_ / "ostree-rootfs"}; + std::string rev_; + std::unique_ptr deployment_; +}; + +class AktualizrSecondaryWrapper { + public: + AktualizrSecondaryWrapper(const OstreeRootfs& sysroot, const Treehub& treehub, const VerificationType vtype) { + config_.pacman.type = PACKAGE_MANAGER_OSTREE; + config_.pacman.os = sysroot.getOSName(); + config_.pacman.sysroot = sysroot.getPath(); + config_.pacman.ostree_server = treehub.url(); + + config_.bootloader.reboot_sentinel_dir = storage_dir_.Path(); + config_.bootloader.reboot_sentinel_name = "need_reboot"; + + config_.storage.path = storage_dir_.Path(); + config_.storage.type = StorageType::kSqlite; + + config_.uptane.verification_type = vtype; + + storage_ = INvStorage::newStorage(config_.storage); + secondary_ = std::make_shared(config_, storage_); + secondary_->initialize(); + } + + std::shared_ptr& operator->() { return secondary_; } + + Uptane::Target getPendingVersion() const { return getVersion().first; } + + Uptane::Target getCurrentVersion() const { return getVersion().second; } + + std::pair getVersion() const { + boost::optional current_target; + boost::optional pending_target; + + storage_->loadInstalledVersions(secondary_->serial().ToString(), ¤t_target, &pending_target); + + return std::make_pair(!pending_target ? Uptane::Target::Unknown() : *pending_target, + !current_target ? Uptane::Target::Unknown() : *current_target); + } + + std::string hardwareID() const { return secondary_->hwID().ToString(); } + + std::string serial() const { return secondary_->serial().ToString(); } + + void reboot() { + boost::filesystem::remove(storage_dir_ / config_.bootloader.reboot_sentinel_name); + secondary_ = std::make_shared(config_, storage_); + secondary_->initialize(); + } + + private: + TemporaryDirectory storage_dir_; + AktualizrSecondaryConfig config_; + std::shared_ptr storage_; + std::shared_ptr secondary_; +}; + +class UptaneRepoWrapper { + public: + UptaneRepoWrapper() { uptane_repo_.generateRepo(KeyType::kED25519); } + + Uptane::SecondaryMetadata addOstreeRev(const std::string& rev, const std::string& hardware_id, + const std::string& serial) { + uptane_repo_.addCustomImage(rev, Hash(Hash::Type::kSha256, rev), 0, hardware_id); + uptane_repo_.addTarget(rev, hardware_id, serial); + uptane_repo_.signTargets(); + + return Uptane::SecondaryMetadata(getCurrentMetadata()); + } + + Uptane::MetaBundle getCurrentMetadata() const { + Uptane::MetaBundle meta_bundle; + std::string metadata; + + boost::filesystem::load_string_file(director_dir_ / "root.json", metadata); + meta_bundle.insert({std::make_pair(Uptane::RepositoryType::Director(), Uptane::Role::Root()), std::move(metadata)}); + boost::filesystem::load_string_file(director_dir_ / "targets.json", metadata); + meta_bundle.insert( + {std::make_pair(Uptane::RepositoryType::Director(), Uptane::Role::Targets()), std::move(metadata)}); + + boost::filesystem::load_string_file(imagerepo_dir_ / "root.json", metadata); + meta_bundle.insert({std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Root()), std::move(metadata)}); + boost::filesystem::load_string_file(imagerepo_dir_ / "timestamp.json", metadata); + meta_bundle.insert( + {std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()), std::move(metadata)}); + boost::filesystem::load_string_file(imagerepo_dir_ / "snapshot.json", metadata); + meta_bundle.insert( + {std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()), std::move(metadata)}); + boost::filesystem::load_string_file(imagerepo_dir_ / "targets.json", metadata); + meta_bundle.insert({std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Targets()), std::move(metadata)}); + + return meta_bundle; + } + + std::shared_ptr getImageData(const std::string& targetname) const { + auto image_data = std::make_shared(); + boost::filesystem::load_string_file(root_dir_ / targetname, *image_data); + return image_data; + } + + private: + TemporaryDirectory root_dir_; + boost::filesystem::path director_dir_{root_dir_ / "repo/director"}; + boost::filesystem::path imagerepo_dir_{root_dir_ / "repo/repo"}; + UptaneRepo uptane_repo_{root_dir_.Path(), "", ""}; +}; + +class SecondaryOstreeTest : public ::testing::TestWithParam { + public: + static const char* curOstreeRootfsRev(OstreeDeployment* ostree_depl) { + (void)ostree_depl; + return sysroot_->getDeploymentRev(); + } + + static OstreeDeployment* curOstreeDeployment(OstreeSysroot* ostree_sysroot) { + (void)ostree_sysroot; + return sysroot_->getDeployment(); + } + + static void setOstreeRootfsTemplate(const std::string& ostree_rootfs_template) { + ostree_rootfs_template_ = ostree_rootfs_template; + } + + protected: + static void SetUpTestSuite() { + treehub_ = std::make_shared("tests/sota_tools/treehub_server.py"); + sysroot_ = std::make_shared(ostree_rootfs_template_); + } + + static void TearDownTestSuite() { + treehub_.reset(); + sysroot_.reset(); + } + + protected: + SecondaryOstreeTest() { + if (needs_reset_) { + sysroot_->resetDeployment(); + needs_reset_ = false; + } + } + + Uptane::MetaBundle addDefaultTarget() { return addTarget(treehub_->curRev()); } + + Uptane::MetaBundle addTarget(const std::string& rev = "", const std::string& hardware_id = "", + const std::string& serial = "") { + auto rev_to_apply = rev.empty() ? treehub_->curRev() : rev; + auto hw_id = hardware_id.empty() ? secondary_.hardwareID() : hardware_id; + auto serial_id = serial.empty() ? secondary_.serial() : serial; + + uptane_repo_.addOstreeRev(rev, hw_id, serial_id); + + return currentMetadata(); + } + + Uptane::MetaBundle currentMetadata() const { return uptane_repo_.getCurrentMetadata(); } + + std::string getCredsToSend() const { + std::map creds_map = { + {"ca.pem", ""}, {"client.pem", ""}, {"pkey.pem", ""}, {"server.url", treehub_->url()}}; + + std::stringstream creads_strstream; + Utils::writeArchive(creds_map, creads_strstream); + + return creads_strstream.str(); + } + + Hash treehubCurRevHash() const { return Hash(Hash::Type::kSha256, treehub_->curRev()); } + Hash sysrootCurRevHash() const { return Hash(Hash::Type::kSha256, sysroot_->getDeploymentRev()); } + const std::string& treehubCurRev() const { return treehub_->curRev(); } + + protected: + static std::shared_ptr treehub_; + static std::string ostree_rootfs_template_; + static std::shared_ptr sysroot_; + static bool needs_reset_; + + AktualizrSecondaryWrapper secondary_{*sysroot_, *treehub_, GetParam()}; + UptaneRepoWrapper uptane_repo_; +}; + +std::shared_ptr SecondaryOstreeTest::treehub_{nullptr}; +std::string SecondaryOstreeTest::ostree_rootfs_template_{"./build/ostree_repo"}; +std::shared_ptr SecondaryOstreeTest::sysroot_{nullptr}; +bool SecondaryOstreeTest::needs_reset_{false}; + +TEST_P(SecondaryOstreeTest, fullUptaneVerificationInvalidRevision) { + EXPECT_TRUE(secondary_->putMetadata(addTarget("invalid-revision")).isSuccess()); + EXPECT_FALSE(secondary_->downloadOstreeUpdate(getCredsToSend()).isSuccess()); +} + +TEST_P(SecondaryOstreeTest, fullUptaneVerificationInvalidHwID) { + EXPECT_FALSE(secondary_->putMetadata(addTarget("", "invalid-hardware-id", "")).isSuccess()); +} + +TEST_P(SecondaryOstreeTest, fullUptaneVerificationInvalidSerial) { + bool expected_result = false; + if (GetParam() == VerificationType::kTuf) { + // Serials aren't checked, so we won't notice the problem. + expected_result = true; + } + EXPECT_EQ(secondary_->putMetadata(addTarget("", "", "invalid-serial-id")).isSuccess(), expected_result); +} + +TEST_P(SecondaryOstreeTest, verifyUpdatePositive) { + // check the version reported in the manifest just after an initial boot + Uptane::Manifest manifest = secondary_->getManifest(); + EXPECT_TRUE(manifest.verifySignature(secondary_->publicKey())); + EXPECT_EQ(manifest.installedImageHash(), sysrootCurRevHash()); + + // send metadata and do their full Uptane verification + EXPECT_TRUE(secondary_->putMetadata(addDefaultTarget()).isSuccess()); + + // emulate reboot to make sure that we can continue with an update installation after reboot + secondary_.reboot(); + + EXPECT_TRUE(secondary_->downloadOstreeUpdate(getCredsToSend()).isSuccess()); + EXPECT_EQ(secondary_->install().result_code.num_code, data::ResultCode::Numeric::kNeedCompletion); + + // check if the update was installed and pending + EXPECT_TRUE(secondary_.getPendingVersion().MatchHash(treehubCurRevHash())); + // manifest should still report the old version + manifest = secondary_->getManifest(); + EXPECT_TRUE(manifest.verifySignature(secondary_->publicKey())); + EXPECT_EQ(manifest.installedImageHash(), sysrootCurRevHash()); + + // emulate reboot + sysroot_->setNewDeploymentRev(treehubCurRev()); + secondary_.reboot(); + + // check if the version in the DB and reported in the manifest matches with the installed and applied one + EXPECT_FALSE(secondary_.getPendingVersion().IsValid()); + EXPECT_TRUE(secondary_.getCurrentVersion().MatchHash(treehubCurRevHash())); + manifest = secondary_->getManifest(); + EXPECT_TRUE(manifest.verifySignature(secondary_->publicKey())); + EXPECT_EQ(manifest.installedImageHash(), treehubCurRevHash()); + + // emulate reboot + // check if the installed version persists after a reboot + secondary_.reboot(); + EXPECT_FALSE(secondary_.getPendingVersion().IsValid()); + EXPECT_TRUE(secondary_.getCurrentVersion().MatchHash(treehubCurRevHash())); + manifest = secondary_->getManifest(); + EXPECT_TRUE(manifest.verifySignature(secondary_->publicKey())); + EXPECT_EQ(manifest.installedImageHash(), treehubCurRevHash()); + + // The next run will require the OSTree deployment to be reset. + needs_reset_ = true; +} + +INSTANTIATE_TEST_SUITE_P(SecondaryTestVerificationType, SecondaryOstreeTest, + ::testing::Values(VerificationType::kFull, VerificationType::kTuf)); + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + + if (argc != 2) { + std::cerr << "Error: " << argv[0] << " \n"; + return EXIT_FAILURE; + } + + SecondaryOstreeTest::setOstreeRootfsTemplate(argv[1]); + + logger_init(); + logger_set_threshold(boost::log::trivial::info); + + return RUN_ALL_TESTS(); +} + +extern "C" OstreeDeployment* ostree_sysroot_get_booted_deployment(OstreeSysroot* ostree_sysroot) { + return SecondaryOstreeTest::curOstreeDeployment(ostree_sysroot); +} + +extern "C" const char* ostree_deployment_get_csum(OstreeDeployment* ostree_deployment) { + return SecondaryOstreeTest::curOstreeRootfsRev(ostree_deployment); +} diff --git a/src/aktualizr_secondary/aktualizr_secondary_test.cc b/src/aktualizr_secondary/aktualizr_secondary_test.cc new file mode 100644 index 0000000000..7094e019f3 --- /dev/null +++ b/src/aktualizr_secondary/aktualizr_secondary_test.cc @@ -0,0 +1,531 @@ +#include +#include + +#include +#include + +#include "aktualizr_secondary_file.h" +#include "crypto/keymanager.h" +#include "libaktualizr/types.h" +#include "storage/invstorage.h" +#include "update_agent_file.h" +#include "uptane_repo.h" +#include "utilities/utils.h" + +using ::testing::NiceMock; + +class UpdateAgentMock : public FileUpdateAgent { + public: + UpdateAgentMock(boost::filesystem::path target_filepath, std::string target_name) + : FileUpdateAgent(std::move(target_filepath), std::move(target_name)) { + ON_CALL(*this, receiveData).WillByDefault([this](const Uptane::Target& target, const uint8_t* data, size_t size) { + return FileUpdateAgent::receiveData(target, data, size); + }); + ON_CALL(*this, install).WillByDefault([this](const Uptane::Target& target) { + return FileUpdateAgent::install(target); + }); + } + + MOCK_METHOD(data::InstallationResult, receiveData, (const Uptane::Target& target, const uint8_t* data, size_t size)); + MOCK_METHOD(data::InstallationResult, install, (const Uptane::Target& target)); +}; + +class AktualizrSecondaryWrapper { + public: + AktualizrSecondaryWrapper(VerificationType verification_type) { + AktualizrSecondaryConfig config; + config.pacman.type = PACKAGE_MANAGER_NONE; + config.uptane.verification_type = verification_type; + config.storage.path = storage_dir_.Path(); + config.storage.type = StorageType::kSqlite; + + storage_ = INvStorage::newStorage(config.storage); + + update_agent_ = std::make_shared>(config.storage.path / "firmware.txt", ""); + + secondary_ = std::make_shared(config, storage_, update_agent_); + secondary_->initialize(); + } + + std::shared_ptr& operator->() { return secondary_; } + + Uptane::Target getPendingVersion() const { + boost::optional pending_target; + + storage_->loadInstalledVersions(secondary_->serial().ToString(), nullptr, &pending_target); + return *pending_target; + } + + std::string hardwareID() const { return secondary_->hwID().ToString(); } + + std::string serial() const { return secondary_->serial().ToString(); } + + boost::filesystem::path targetFilepath() const { + return storage_dir_.Path() / AktualizrSecondaryFile::FileUpdateDefaultFile; + } + + std::shared_ptr> update_agent_; + + private: + TemporaryDirectory storage_dir_; + std::shared_ptr secondary_; + std::shared_ptr storage_; +}; + +class UptaneRepoWrapper { + public: + UptaneRepoWrapper() { uptane_repo_.generateRepo(KeyType::kED25519); } + + Uptane::SecondaryMetadata addImageFile(const std::string& targetname, const std::string& hardware_id, + const std::string& serial, size_t size = 2049, bool add_and_sign_target = true, + bool add_invalid_images = false, size_t delta = 2) { + const auto image_file_path = root_dir_ / targetname; + generateRandomFile(image_file_path, size); + + uptane_repo_.addImage(image_file_path, targetname, hardware_id); + if (add_and_sign_target) { + uptane_repo_.addTarget(targetname, hardware_id, serial); + uptane_repo_.signTargets(); + } + + if (add_and_sign_target && add_invalid_images) { + const auto smaller_image_file_path = image_file_path.string() + ".smaller"; + const auto bigger_image_file_path = image_file_path.string() + ".bigger"; + const auto broken_image_file_path = image_file_path.string() + ".broken"; + + boost::filesystem::copy(image_file_path, smaller_image_file_path); + boost::filesystem::copy(image_file_path, bigger_image_file_path); + boost::filesystem::copy(image_file_path, broken_image_file_path); + + if (!boost::filesystem::exists(smaller_image_file_path)) { + LOG_ERROR << "File does not exists: " << smaller_image_file_path; + } + + boost::filesystem::resize_file(smaller_image_file_path, size - delta); + boost::filesystem::resize_file(bigger_image_file_path, size + delta); + + std::ofstream broken_image{broken_image_file_path, + std::ios_base::in | std::ios_base::out | std::ios_base::ate | std::ios_base::binary}; + unsigned char data_to_inject[]{0xFF}; + broken_image.seekp(static_cast(-sizeof(data_to_inject)), std::ios_base::end); + broken_image.write(reinterpret_cast(data_to_inject), sizeof(data_to_inject)); + broken_image.close(); + } + + return Uptane::SecondaryMetadata(getCurrentMetadata()); + } + + void addCustomImageMetadata(const std::string& targetname, const std::string& hardware_id, + const std::string& custom_version) { + auto custom = Json::Value(); + custom["targetFormat"] = "BINARY"; + custom["version"] = custom_version; + // Don't use the custom_version since it only allows integers and we want to + // be able to put garbage there. + uptane_repo_.addCustomImage(targetname, Hash(Hash::Type::kSha256, targetname), 1, hardware_id, "", 0, Delegation(), + custom); + } + + Uptane::MetaBundle getCurrentMetadata() const { + Uptane::MetaBundle meta_bundle; + std::string metadata; + + boost::filesystem::load_string_file(director_dir_ / "root.json", metadata); + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Director(), Uptane::Role::Root()), std::move(metadata)); + boost::filesystem::load_string_file(director_dir_ / "targets.json", metadata); + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Director(), Uptane::Role::Targets()), + std::move(metadata)); + + boost::filesystem::load_string_file(imagerepo_dir_ / "root.json", metadata); + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Root()), std::move(metadata)); + boost::filesystem::load_string_file(imagerepo_dir_ / "timestamp.json", metadata); + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()), + std::move(metadata)); + boost::filesystem::load_string_file(imagerepo_dir_ / "snapshot.json", metadata); + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()), std::move(metadata)); + boost::filesystem::load_string_file(imagerepo_dir_ / "targets.json", metadata); + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Targets()), std::move(metadata)); + + return meta_bundle; + } + + std::string getTargetImagePath(const std::string& targetname) const { return (root_dir_ / targetname).string(); } + + void refreshRoot(Uptane::RepositoryType repo) { uptane_repo_.refresh(repo, Uptane::Role::Root()); } + + private: + static void generateRandomFile(const boost::filesystem::path& filepath, size_t size) { + std::ofstream file{filepath.string(), std::ofstream::binary}; + + if (!file.is_open() || !file.good()) { + throw std::runtime_error("Failed to create a file: " + filepath.string()); + } + + const unsigned char symbols[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuv"; + unsigned char cur_symbol; + + for (unsigned int ii = 0; ii < size; ++ii) { + cur_symbol = symbols[static_cast(rand()) % sizeof(symbols)]; + file.put(static_cast(cur_symbol)); + } + + file.close(); + } + + private: + TemporaryDirectory root_dir_; + boost::filesystem::path director_dir_{root_dir_ / "repo/director"}; + boost::filesystem::path imagerepo_dir_{root_dir_ / "repo/repo"}; + UptaneRepo uptane_repo_{root_dir_.Path(), "", ""}; + Uptane::DirectorRepository director_repo_; +}; + +class SecondaryTest : public ::testing::Test { + public: + SecondaryTest(VerificationType verification_type = VerificationType::kFull, bool default_target = true) + : secondary_(verification_type), update_agent_(*(secondary_.update_agent_)) { + if (default_target) { + uptane_repo_.addImageFile(default_target_, secondary_->hwID().ToString(), secondary_->serial().ToString(), + target_size, true, true, inavlid_target_size_delta); + } + } + + private: + std::vector getCurrentTargets() { + auto targets = Uptane::Targets(Utils::parseJSON(getMetaFromBundle( + uptane_repo_.getCurrentMetadata(), Uptane::RepositoryType::Director(), Uptane::Role::Targets()))); + return targets.getTargets(secondary_->serial(), secondary_->hwID()); + } + + Uptane::Target getDefaultTarget() { + auto targets = getCurrentTargets(); + EXPECT_GT(targets.size(), 0); + return targets[0]; + } + + Hash getDefaultTargetHash() { return Hash(Hash::Type::kSha256, getDefaultTarget().sha256Hash()); } + + protected: + data::ResultCode::Numeric sendImageFile(std::string target_name = default_target_) { + auto image_path = uptane_repo_.getTargetImagePath(target_name); + size_t total_size = boost::filesystem::file_size(image_path); + + std::ifstream file{image_path}; + + uint8_t buf[send_buffer_size]; + size_t read_and_send_data_size = 0; + + while (read_and_send_data_size < total_size) { + auto read_bytes = file.readsome(reinterpret_cast(buf), sizeof(buf)); + if (read_bytes < 0) { + file.close(); + return data::ResultCode::Numeric::kGeneralError; + } + + auto result = secondary_->receiveData(buf, static_cast(read_bytes)); + if (!result.isSuccess()) { + file.close(); + return result.result_code.num_code; + } + read_and_send_data_size += static_cast(read_bytes); + } + + file.close(); + + data::ResultCode::Numeric result{data::ResultCode::Numeric::kGeneralError}; + if (read_and_send_data_size == total_size) { + result = data::ResultCode::Numeric::kOk; + } + + return result; + } + + void verifyTargetAndManifest() { + // check if a file was actually updated + ASSERT_TRUE(boost::filesystem::exists(secondary_.targetFilepath())); + auto target = getDefaultTarget(); + + // check the updated file hash + auto target_hash = Hash(Hash::Type::kSha256, target.sha256Hash()); + auto target_file_hash = Hash::generate(Hash::Type::kSha256, Utils::readFile(secondary_.targetFilepath())); + EXPECT_EQ(target_hash, target_file_hash); + + // check the secondary manifest + auto manifest = secondary_->getManifest(); + EXPECT_EQ(manifest.installedImageHash(), target_file_hash); + EXPECT_EQ(manifest.filepath(), target.filename()); + } + + static constexpr const char* const default_target_{"default-target"}; + static constexpr const char* const bigger_target_{"default-target.bigger"}; + static constexpr const char* const smaller_target_{"default-target.smaller"}; + static constexpr const char* const broken_target_{"default-target.broken"}; + + static const size_t target_size{2049}; + static const size_t inavlid_target_size_delta{2}; + static const size_t send_buffer_size{1024}; + + AktualizrSecondaryWrapper secondary_; + UptaneRepoWrapper uptane_repo_; + NiceMock& update_agent_; + TemporaryDirectory image_dir_; +}; + +class SecondaryTestNegative + : public SecondaryTest, + public ::testing::WithParamInterface> { + public: + SecondaryTestNegative() : SecondaryTest(std::get<2>(GetParam())), success_expected_(std::get<3>(GetParam())) {} + + protected: + class MetadataInvalidator : public Uptane::SecondaryMetadata { + public: + MetadataInvalidator(const Uptane::MetaBundle& valid_metadata, const Uptane::RepositoryType& repo, + const Uptane::Role& role) + : Uptane::SecondaryMetadata(valid_metadata), repo_type_(repo), role_(role) {} + + void getRoleMetadata(std::string* result, const Uptane::RepositoryType& repo, const Uptane::Role& role, + Uptane::Version version) const override { + Uptane::SecondaryMetadata::getRoleMetadata(result, repo, role, version); + if (!(repo_type_ == repo && role_ == role)) { + return; + } + (*result)[10] = 'f'; + } + + private: + Uptane::RepositoryType repo_type_; + Uptane::Role role_; + }; + + MetadataInvalidator currentMetadata() const { + return MetadataInvalidator(uptane_repo_.getCurrentMetadata(), std::get<0>(GetParam()), std::get<1>(GetParam())); + } + + bool success_expected_; +}; + +/** + * This test is parameterized to control which metadata to malform. See + * INSTANTIATE_TEST_SUITE_P for the list of test instantiations with concrete + * parameter values. + */ +TEST_P(SecondaryTestNegative, MalformedMetadaJson) { + data::ResultCode::Numeric result{data::ResultCode::Numeric::kGeneralError}; + if (!success_expected_) { + EXPECT_CALL(update_agent_, receiveData).Times(0); + EXPECT_CALL(update_agent_, install).Times(0); + } else { + EXPECT_CALL(update_agent_, receiveData) + .Times(target_size / send_buffer_size + (target_size % send_buffer_size ? 1 : 0)); + EXPECT_CALL(update_agent_, install).Times(1); + result = data::ResultCode::Numeric::kOk; + } + + EXPECT_EQ(secondary_->putMetadata(currentMetadata()).isSuccess(), success_expected_); + ASSERT_EQ(sendImageFile(), result); + EXPECT_EQ(secondary_->install().isSuccess(), success_expected_); + + if (success_expected_) { + verifyTargetAndManifest(); + } +} + +/** + * Instantiates the parameterized test for each specified value of + * std::tuple. + * The parameter value indicates which metadata to malform. Anything that + * expects success (true) can be considered something like a failure to detect + * an attack. + */ +INSTANTIATE_TEST_SUITE_P( + SecondaryTestMalformedMetadata, SecondaryTestNegative, + ::testing::Values( + std::make_tuple(Uptane::RepositoryType::Director(), Uptane::Role::Root(), VerificationType::kFull, false), + std::make_tuple(Uptane::RepositoryType::Director(), Uptane::Role::Targets(), VerificationType::kFull, false), + std::make_tuple(Uptane::RepositoryType::Image(), Uptane::Role::Root(), VerificationType::kFull, false), + std::make_tuple(Uptane::RepositoryType::Image(), Uptane::Role::Timestamp(), VerificationType::kFull, false), + std::make_tuple(Uptane::RepositoryType::Image(), Uptane::Role::Snapshot(), VerificationType::kFull, false), + std::make_tuple(Uptane::RepositoryType::Image(), Uptane::Role::Targets(), VerificationType::kFull, false), + std::make_tuple(Uptane::RepositoryType::Director(), Uptane::Role::Root(), VerificationType::kTuf, true), + std::make_tuple(Uptane::RepositoryType::Director(), Uptane::Role::Targets(), VerificationType::kTuf, true), + std::make_tuple(Uptane::RepositoryType::Image(), Uptane::Role::Root(), VerificationType::kTuf, false), + std::make_tuple(Uptane::RepositoryType::Image(), Uptane::Role::Timestamp(), VerificationType::kTuf, false), + std::make_tuple(Uptane::RepositoryType::Image(), Uptane::Role::Snapshot(), VerificationType::kTuf, false), + std::make_tuple(Uptane::RepositoryType::Image(), Uptane::Role::Targets(), VerificationType::kTuf, false))); + +class SecondaryTestVerification : public SecondaryTest, public ::testing::WithParamInterface { + public: + SecondaryTestVerification() : SecondaryTest(GetParam()){}; +}; + +/** + * This test is parameterized with VerificationType to indicate what level of + * metadata verification to perform. See INSTANTIATE_TEST_SUITE_P for the list + * of test instantiations with concrete parameter values. + */ +TEST_P(SecondaryTestVerification, VerificationPositive) { + EXPECT_CALL(update_agent_, receiveData) + .Times(target_size / send_buffer_size + (target_size % send_buffer_size ? 1 : 0)); + EXPECT_CALL(update_agent_, install).Times(1); + + ASSERT_TRUE(secondary_->putMetadata(uptane_repo_.getCurrentMetadata()).isSuccess()); + ASSERT_EQ(sendImageFile(), data::ResultCode::Numeric::kOk); + ASSERT_TRUE(secondary_->install().isSuccess()); + + verifyTargetAndManifest(); +} + +/** + * Instantiates the parameterized test for each specified value of VerificationType. + */ +INSTANTIATE_TEST_SUITE_P(SecondaryTestVerificationType, SecondaryTestVerification, + ::testing::Values(VerificationType::kFull, VerificationType::kTuf)); + +TEST_F(SecondaryTest, TwoImagesAndOneTarget) { + // two images for the same ECU, just one of them is added as a target and signed + // default image and corresponding target has been already added, just add another image + auto metadata = uptane_repo_.addImageFile("second_image_00", secondary_->hwID().ToString(), + secondary_->serial().ToString(), target_size, false, false); + EXPECT_TRUE(secondary_->putMetadata(metadata).isSuccess()); +} + +TEST_F(SecondaryTest, IncorrectTargetQuantity) { + const std::string hwid{secondary_->hwID().ToString()}; + const std::string serial{secondary_->serial().ToString()}; + { + // two targets for the same ECU + auto metadata = uptane_repo_.addImageFile("second_target", hwid, serial); + EXPECT_FALSE(secondary_->putMetadata(metadata).isSuccess()); + } + + { + // zero targets for the ECU being tested + auto metadata = uptane_repo_.addImageFile("mytarget", hwid, "non-existing-serial"); + EXPECT_FALSE(secondary_->putMetadata(metadata).isSuccess()); + } + + { + // zero targets for the ECU being tested + auto metadata = uptane_repo_.addImageFile("mytarget", "non-existig-hwid", serial); + EXPECT_FALSE(secondary_->putMetadata(metadata).isSuccess()); + } +} + +TEST_F(SecondaryTest, DirectorRootVersionIncremented) { + uptane_repo_.refreshRoot(Uptane::RepositoryType::Director()); + EXPECT_TRUE(secondary_->putMetadata(uptane_repo_.getCurrentMetadata()).isSuccess()); +} + +TEST_F(SecondaryTest, ImageRootVersionIncremented) { + uptane_repo_.refreshRoot(Uptane::RepositoryType::Image()); + EXPECT_TRUE(secondary_->putMetadata(uptane_repo_.getCurrentMetadata()).isSuccess()); +} + +TEST_F(SecondaryTest, SmallerImageFileSize) { + EXPECT_CALL(update_agent_, receiveData) + .Times((target_size - inavlid_target_size_delta) / send_buffer_size + + ((target_size - inavlid_target_size_delta) % send_buffer_size ? 1 : 0)); + EXPECT_CALL(update_agent_, install).Times(1); + + EXPECT_TRUE(secondary_->putMetadata(uptane_repo_.getCurrentMetadata()).isSuccess()); + + EXPECT_EQ(sendImageFile(smaller_target_), data::ResultCode::Numeric::kOk); + EXPECT_FALSE(secondary_->install().isSuccess()); +} + +TEST_F(SecondaryTest, BiggerImageFileSize) { + EXPECT_CALL(update_agent_, receiveData) + .Times((target_size + inavlid_target_size_delta) / send_buffer_size + + ((target_size + inavlid_target_size_delta) % send_buffer_size ? 1 : 0)); + EXPECT_CALL(update_agent_, install).Times(1); + + EXPECT_TRUE(secondary_->putMetadata(uptane_repo_.getCurrentMetadata()).isSuccess()); + + EXPECT_EQ(sendImageFile(bigger_target_), data::ResultCode::Numeric::kOk); + EXPECT_FALSE(secondary_->install().isSuccess()); +} + +TEST_F(SecondaryTest, InvalidImageData) { + EXPECT_CALL(update_agent_, receiveData) + .Times(target_size / send_buffer_size + (target_size % send_buffer_size ? 1 : 0)); + EXPECT_CALL(update_agent_, install).Times(1); + + EXPECT_TRUE(secondary_->putMetadata(uptane_repo_.getCurrentMetadata()).isSuccess()); + EXPECT_EQ(sendImageFile(broken_target_), data::ResultCode::Numeric::kOk); + EXPECT_FALSE(secondary_->install().isSuccess()); +} + +class SecondaryTestTuf + : public SecondaryTest, + public ::testing::WithParamInterface, boost::optional>> { + public: + // No default Targets so as to be able to more thoroughly test the Target + // comparison. + SecondaryTestTuf() : SecondaryTest(VerificationType::kTuf, false){}; +}; + +/** + * This test is parameterized with a series of Targets with custom versions and + * which one should be considered the latest, if any. See + * INSTANTIATE_TEST_SUITE_P for the list of test instantiations with concrete + * parameter values. + */ +TEST_P(SecondaryTestTuf, TufVersions) { + const std::string hwid{secondary_->hwID().ToString()}; + { + int counter = 0; + for (const auto& version : GetParam().first) { + // Add counter so we can add multiple Targets with the same version. + uptane_repo_.addCustomImageMetadata("v" + version + "-" + std::to_string(++counter), hwid, version); + } + auto metadata = uptane_repo_.getCurrentMetadata(); + auto expected = GetParam().second; + EXPECT_EQ(secondary_->putMetadata(metadata).isSuccess(), !!expected); + if (!!expected) { + EXPECT_EQ(secondary_->getPendingTarget().custom_version(), expected); + // Ignore the initial "v" and the counter suffix. + EXPECT_EQ(secondary_->getPendingTarget().filename().compare(1, expected->size(), expected.get()), 0); + } + } +} + +/** + * Instantiates the parameterized test for each specified value of + * std::pair, boost::optional>>. + * The first parameter value is a list of Targets with custom versions and the + * second paramter is which one should be considered the latest, if any. + */ +INSTANTIATE_TEST_SUITE_P(SecondaryTestTufVersions, SecondaryTestTuf, + ::testing::Values(std::make_pair(std::vector{"1"}, "1"), + std::make_pair(std::vector{"1", "2"}, "2"), + std::make_pair(std::vector{"1", "2", "3"}, "3"), + std::make_pair(std::vector{"3", "2", "1"}, "3"), + std::make_pair(std::vector{"2", "3", "1"}, "3"), + std::make_pair(std::vector{"invalid", "1"}, "1"), + std::make_pair(std::vector{"1", "invalid"}, "1"), + std::make_pair(std::vector{"invalid", "1", "2"}, "2"), + std::make_pair(std::vector{"1", "2", "invalid"}, "2"), + std::make_pair(std::vector{"1", "invalid", "2"}, "2"), + std::make_pair(std::vector{"1", "invalid1", "invalid2"}, "1"), + std::make_pair(std::vector{"invalid1", "1", "invalid2"}, "1"), + std::make_pair(std::vector{"invalid1", "invalid2", "1"}, "1"), + std::make_pair(std::vector{"1", "1", "2"}, "2"), + std::make_pair(std::vector{"2", "1", "1"}, "2"), + std::make_pair(std::vector{"1", "2", "1"}, "2"), + std::make_pair(std::vector{"1", "2", "2"}, boost::none), + std::make_pair(std::vector{"2", "2", "1"}, boost::none), + std::make_pair(std::vector{"2", "1", "2"}, boost::none), + std::make_pair(std::vector{""}, ""), + std::make_pair(std::vector{"text"}, "text"), + std::make_pair(std::vector{"invalid1", "invalid2"}, + boost::none))); + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + + logger_init(); + logger_set_threshold(boost::log::trivial::info); + + return RUN_ALL_TESTS(); +} diff --git a/src/aktualizr_secondary/main.cc b/src/aktualizr_secondary/main.cc index 957e6660d3..2b84588f31 100644 --- a/src/aktualizr_secondary/main.cc +++ b/src/aktualizr_secondary/main.cc @@ -9,7 +9,12 @@ #include "utilities/aktualizr_version.h" #include "utilities/utils.h" +#include "aktualizr_secondary_file.h" #include "logging/logging.h" +#include "secondary_tcp_server.h" +#ifdef BUILD_OSTREE +#include "aktualizr_secondary_ostree.h" +#endif namespace bpo = boost::program_options; @@ -24,7 +29,7 @@ void check_secondary_options(const bpo::options_description &description, const } } -bpo::variables_map parse_options(int argc, char *argv[]) { +bpo::variables_map parse_options(int argc, char **argv) { bpo::options_description description("aktualizr-secondary command line options"); // clang-format off description.add_options() @@ -33,8 +38,8 @@ bpo::variables_map parse_options(int argc, char *argv[]) { ("loglevel", bpo::value(), "set log level 0-5 (trace, debug, info, warning, error, fatal)") ("config,c", bpo::value >()->composing(), "configuration file or directory") ("server-port,p", bpo::value(), "command server listening port") - ("ecu-serial", bpo::value(), "serial number of secondary ecu") - ("ecu-hardware-id", bpo::value(), "hardware ID of secondary ecu"); + ("ecu-serial", bpo::value(), "serial number of Secondary ECU") + ("ecu-hardware-id", bpo::value(), "hardware ID of Secondary ECU"); // clang-format on bpo::variables_map vm; @@ -76,22 +81,43 @@ bpo::variables_map parse_options(int argc, char *argv[]) { int main(int argc, char *argv[]) { logger_init(); logger_set_threshold(boost::log::trivial::info); - LOG_INFO << "Aktualizr-secondary version " << aktualizr_version() << " starting"; + LOG_INFO << "aktualizr-secondary version " << aktualizr_version() << " starting"; bpo::variables_map commandline_map = parse_options(argc, argv); int ret = EXIT_SUCCESS; try { AktualizrSecondaryConfig config(commandline_map); - LOG_DEBUG << "Current directory: " << boost::filesystem::current_path().string(); + AktualizrSecondary::Ptr secondary; - // storage (share class with primary) - std::shared_ptr storage = INvStorage::newStorage(config.storage); - std::unique_ptr secondary; - secondary = std_::make_unique(config, storage); - secondary->run(); + if (config.pacman.type != PACKAGE_MANAGER_OSTREE) { + secondary = std::make_shared(config); + } +#ifdef BUILD_OSTREE + else { + secondary = std::make_shared(config); + } +#else + else { + LOG_ERROR << "Unsupported type of Secondary: " << config.pacman.type; + } +#endif // BUILD_OSTREE + + if (!secondary) { + throw std::runtime_error("Failed to create IP Secondary of the specified type: " + config.pacman.type); + } + secondary->initialize(); + + SecondaryTcpServer tcp_server(*secondary, config.network.primary_ip, config.network.primary_port, + config.network.port, config.uptane.force_install_completion); + + tcp_server.run(); + + if (tcp_server.exit_reason() == SecondaryTcpServer::ExitReason::kRebootNeeded) { + secondary->completeInstall(); + } - } catch (std::runtime_error &exc) { + } catch (std::exception &exc) { LOG_ERROR << "Error: " << exc.what(); ret = EXIT_FAILURE; } diff --git a/src/aktualizr_secondary/msg_handler.cc b/src/aktualizr_secondary/msg_handler.cc new file mode 100644 index 0000000000..11e2d86b15 --- /dev/null +++ b/src/aktualizr_secondary/msg_handler.cc @@ -0,0 +1,26 @@ +#include "msg_handler.h" + +#include "logging/logging.h" + +void MsgDispatcher::clearHandlers() { handler_map_.clear(); } + +void MsgDispatcher::registerHandler(AKIpUptaneMes_PR msg_id, Handler handler) { + handler_map_[msg_id] = std::move(handler); +} + +MsgHandler::ReturnCode MsgDispatcher::handleMsg(const Asn1Message::Ptr& in_msg, Asn1Message::Ptr& out_msg) { + auto find_res_it = handler_map_.find(in_msg->present()); + if (find_res_it == handler_map_.end()) { + return MsgHandler::kUnkownMsg; + } + LOG_TRACE << "Found a handler for the request, processing it..."; + auto handle_status_code = find_res_it->second(*in_msg, *out_msg); + LOG_TRACE << "Request handler returned a response: " << out_msg->toStr(); + + // Track the last message to help cut down on repetitive logging. Ignore the + // version messages since they just get in the way. + if (in_msg->present() != AKIpUptaneMes_PR_versionReq) { + last_msg_ = in_msg->present(); + } + return handle_status_code; +} diff --git a/src/aktualizr_secondary/msg_handler.h b/src/aktualizr_secondary/msg_handler.h new file mode 100644 index 0000000000..11f18ef020 --- /dev/null +++ b/src/aktualizr_secondary/msg_handler.h @@ -0,0 +1,40 @@ +#ifndef MSG_HANDLER_H +#define MSG_HANDLER_H + +#include +#include + +#include "AKIpUptaneMes.h" +#include "asn1/asn1_message.h" + +class MsgHandler { + public: + enum ReturnCode { kUnkownMsg = -1, kOk, kRebootRequired }; + + MsgHandler() = default; + virtual ~MsgHandler() = default; + MsgHandler(const MsgHandler&) = delete; + MsgHandler(MsgHandler&&) = delete; + MsgHandler& operator=(const MsgHandler&) = delete; + MsgHandler& operator=(MsgHandler&&) = delete; + + virtual ReturnCode handleMsg(const Asn1Message::Ptr& in_msg, Asn1Message::Ptr& out_msg) = 0; +}; + +class MsgDispatcher : public MsgHandler { + public: + using Handler = std::function; + + void registerHandler(AKIpUptaneMes_PR msg_id, Handler handler); + ReturnCode handleMsg(const Asn1Message::Ptr& in_msg, Asn1Message::Ptr& out_msg) override; + + protected: + void clearHandlers(); + + unsigned int last_msg_ = 0; + + private: + std::unordered_map handler_map_; +}; + +#endif // MSG_HANDLER_H diff --git a/src/aktualizr_secondary/secondary_rpc_test.cc b/src/aktualizr_secondary/secondary_rpc_test.cc new file mode 100644 index 0000000000..2ca5961c4b --- /dev/null +++ b/src/aktualizr_secondary/secondary_rpc_test.cc @@ -0,0 +1,921 @@ +#include + +#include +#include +#include + +#include "crypto/crypto.h" +#include "ipuptanesecondary.h" +#include "libaktualizr/packagemanagerfactory.h" +#include "libaktualizr/packagemanagerinterface.h" +#include "logging/logging.h" +#include "msg_handler.h" +#include "primary/secondary_provider_builder.h" +#include "secondary_tcp_server.h" +#include "storage/invstorage.h" +#include "test_utils.h" + +enum class HandlerVersion { kV1, kV2, kV2Failure }; + +/* This class allows us to divert messages from the regular handlers in + * AktualizrSecondary to our own test functions. This lets us test only what was + * received by the Secondary but not how it was processed. + * + * It also has handlers for both the old/v1 and new/v2 versions of the RPC + * protocol, so this is how we prove that the Primary is still + * backwards-compatible with older/v1 Secondaries. */ +class SecondaryMock : public MsgDispatcher { + public: + SecondaryMock(const Uptane::EcuSerial& serial, const Uptane::HardwareIdentifier& hdw_id, const PublicKey& pub_key, + const Uptane::Manifest& manifest, VerificationType vtype, HandlerVersion handler_version) + : serial_(serial), + hdw_id_(hdw_id), + pub_key_(pub_key), + manifest_(manifest), + image_filepath_{image_dir_ / "image.bin"}, + hasher_{MultiPartHasher::create(Hash::Type::kSha256)}, + vtype_{vtype}, + handler_version_(handler_version) { + registerHandlers(); + } + + public: + const std::string verification_failure = "Expected verification test failure"; + const std::string upload_data_failure = "Expected data upload test failure"; + const std::string ostree_failure = "Expected OSTree download test failure"; + const std::string installation_failure = "Expected installation test failure"; + + const Uptane::EcuSerial& serial() const { return serial_; } + const Uptane::HardwareIdentifier& hwID() const { return hdw_id_; } + const PublicKey& publicKey() const { return pub_key_; } + const Uptane::Manifest& manifest() const { return manifest_; } + const Uptane::MetaBundle& metadata() const { return meta_bundle_; } + HandlerVersion handlerVersion() const { return handler_version_; } + void setHandlerVersion(HandlerVersion handler_version_in) { handler_version_ = handler_version_in; } + void registerHandlers() { + registerBaseHandlers(); + if (handler_version_ == HandlerVersion::kV1) { + registerV1Handlers(); + } else if (handler_version_ == HandlerVersion::kV2) { + registerV2Handlers(); + } else { + registerV2FailureHandlers(); + } + } + + void resetImageHash() const { hasher_->reset(); } + Hash getReceivedImageHash() const { return hasher_->getHash(); } + size_t getReceivedImageSize() const { return boost::filesystem::file_size(image_filepath_); } + + const std::string& getReceivedTlsCreds() const { return tls_creds_; } + + // Used by both protocol versions: + void registerBaseHandlers() { + registerHandler(AKIpUptaneMes_PR_getInfoReq, + std::bind(&SecondaryMock::getInfoHdlr, this, std::placeholders::_1, std::placeholders::_2)); + registerHandler(AKIpUptaneMes_PR_versionReq, + std::bind(&SecondaryMock::versionHdlr, this, std::placeholders::_1, std::placeholders::_2)); + registerHandler(AKIpUptaneMes_PR_manifestReq, + std::bind(&SecondaryMock::getManifestHdlr, this, std::placeholders::_1, std::placeholders::_2)); + } + + // Used by protocol v1 (deprecated, no longer implemented in production code) only: + void registerV1Handlers() { + registerHandler(AKIpUptaneMes_PR_putMetaReq, + std::bind(&SecondaryMock::putMetaHdlr, this, std::placeholders::_1, std::placeholders::_2)); + registerHandler(AKIpUptaneMes_PR_sendFirmwareReq, + std::bind(&SecondaryMock::sendFirmwareHdlr, this, std::placeholders::_1, std::placeholders::_2)); + registerHandler(AKIpUptaneMes_PR_installReq, + std::bind(&SecondaryMock::installHdlr, this, std::placeholders::_1, std::placeholders::_2)); + + // These didn't exist in v1 and should just simply fail. + registerHandler(AKIpUptaneMes_PR_rootVerReq, + std::bind(&SecondaryMock::rootVerFailureHdlr, this, std::placeholders::_1, std::placeholders::_2)); + registerHandler(AKIpUptaneMes_PR_putRootReq, + std::bind(&SecondaryMock::putRootFailureHdlr, this, std::placeholders::_1, std::placeholders::_2)); + } + + // Used by protocol v2 (based on current aktualizr-secondary implementation) only: + void registerV2Handlers() { + registerHandler(AKIpUptaneMes_PR_putMetaReq2, + std::bind(&SecondaryMock::putMeta2Hdlr, this, std::placeholders::_1, std::placeholders::_2)); + registerHandler(AKIpUptaneMes_PR_uploadDataReq, + std::bind(&SecondaryMock::uploadDataHdlr, this, std::placeholders::_1, std::placeholders::_2)); + registerHandler(AKIpUptaneMes_PR_downloadOstreeRevReq, + std::bind(&SecondaryMock::downloadOstreeRev, this, std::placeholders::_1, std::placeholders::_2)); + registerHandler(AKIpUptaneMes_PR_installReq, + std::bind(&SecondaryMock::install2Hdlr, this, std::placeholders::_1, std::placeholders::_2)); + registerHandler(AKIpUptaneMes_PR_rootVerReq, + std::bind(&SecondaryMock::rootVerHdlr, this, std::placeholders::_1, std::placeholders::_2)); + registerHandler(AKIpUptaneMes_PR_putRootReq, + std::bind(&SecondaryMock::putRootHdlr, this, std::placeholders::_1, std::placeholders::_2)); + } + + // Procotol v2 handlers that fail in predictable ways. + void registerV2FailureHandlers() { + registerHandler(AKIpUptaneMes_PR_putMetaReq2, + std::bind(&SecondaryMock::putMeta2FailureHdlr, this, std::placeholders::_1, std::placeholders::_2)); + registerHandler(AKIpUptaneMes_PR_uploadDataReq, std::bind(&SecondaryMock::uploadDataFailureHdlr, this, + std::placeholders::_1, std::placeholders::_2)); + registerHandler(AKIpUptaneMes_PR_downloadOstreeRevReq, std::bind(&SecondaryMock::downloadOstreeRevFailure, this, + std::placeholders::_1, std::placeholders::_2)); + registerHandler(AKIpUptaneMes_PR_installReq, + std::bind(&SecondaryMock::install2FailureHdlr, this, std::placeholders::_1, std::placeholders::_2)); + registerHandler(AKIpUptaneMes_PR_rootVerReq, + std::bind(&SecondaryMock::rootVerFailureHdlr, this, std::placeholders::_1, std::placeholders::_2)); + registerHandler(AKIpUptaneMes_PR_putRootReq, + std::bind(&SecondaryMock::putRootFailureHdlr, this, std::placeholders::_1, std::placeholders::_2)); + } + + private: + MsgHandler::ReturnCode getInfoHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + (void)in_msg; + + out_msg.present(AKIpUptaneMes_PR_getInfoResp); + auto info_resp = out_msg.getInfoResp(); + + SetString(&info_resp->ecuSerial, serial_.ToString()); + SetString(&info_resp->hwId, hdw_id_.ToString()); + info_resp->keyType = static_cast(pub_key_.Type()); + SetString(&info_resp->key, pub_key_.Value()); + + return ReturnCode::kOk; + } + + MsgHandler::ReturnCode versionHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + (void)in_msg; + + auto m = out_msg.present(AKIpUptaneMes_PR_versionResp).versionResp(); + if (handler_version_ == HandlerVersion::kV1) { + m->version = 1; + } else { + m->version = 2; + } + + return ReturnCode::kOk; + } + + MsgHandler::ReturnCode getManifestHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + (void)in_msg; + + out_msg.present(AKIpUptaneMes_PR_manifestResp); + auto manifest_resp = out_msg.manifestResp(); + manifest_resp->manifest.present = manifest_PR_json; + SetString(&manifest_resp->manifest.choice.json, Utils::jsonToStr(manifest())); + + return ReturnCode::kOk; + } + + // This is basically the old implemention from AktualizrSecondary. The v1 + // protocol never had TUF verification so it isn't accounted for here. + MsgHandler::ReturnCode putMetaHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + auto md = in_msg.putMetaReq(); + + meta_bundle_.emplace(std::make_pair(Uptane::RepositoryType::Director(), Uptane::Role::Root()), + ToString(md->director.choice.json.root)); + meta_bundle_.emplace(std::make_pair(Uptane::RepositoryType::Director(), Uptane::Role::Targets()), + ToString(md->director.choice.json.targets)); + + meta_bundle_.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Root()), + ToString(md->image.choice.json.root)); + meta_bundle_.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()), + ToString(md->image.choice.json.timestamp)); + meta_bundle_.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()), + ToString(md->image.choice.json.snapshot)); + meta_bundle_.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Targets()), + ToString(md->image.choice.json.targets)); + + out_msg.present(AKIpUptaneMes_PR_putMetaResp).putMetaResp()->result = AKInstallationResult_success; + + return ReturnCode::kOk; + } + + // This is annoyingly similar to AktualizrSecondary::putMetaHdlr(). + MsgHandler::ReturnCode putMeta2Hdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + auto md = in_msg.putMetaReq2(); + Uptane::MetaBundle meta_bundle; + + EXPECT_EQ(md->directorRepo.present, directorRepo_PR_collection); + if (vtype_ == VerificationType::kFull) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + const int director_meta_count = md->directorRepo.choice.collection.list.count; + EXPECT_EQ(director_meta_count, 2); + for (int i = 0; i < director_meta_count; i++) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic, cppcoreguidelines-pro-type-union-access) + const AKMetaJson_t object = *md->directorRepo.choice.collection.list.array[i]; + const std::string role = ToString(object.role); + std::string json = ToString(object.json); + if (role == Uptane::Role::ROOT) { + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Director(), Uptane::Role::Root()), + std::move(json)); + } else if (role == Uptane::Role::TARGETS) { + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Director(), Uptane::Role::Targets()), + std::move(json)); + } + } + } + + EXPECT_EQ(md->imageRepo.present, imageRepo_PR_collection); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + const int image_meta_count = md->imageRepo.choice.collection.list.count; + EXPECT_EQ(image_meta_count, 4); + for (int i = 0; i < image_meta_count; i++) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic, cppcoreguidelines-pro-type-union-access) + const AKMetaJson_t object = *md->imageRepo.choice.collection.list.array[i]; + const std::string role = ToString(object.role); + std::string json = ToString(object.json); + if (role == Uptane::Role::ROOT) { + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Root()), std::move(json)); + } else if (role == Uptane::Role::TIMESTAMP) { + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()), + std::move(json)); + } else if (role == Uptane::Role::SNAPSHOT) { + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()), std::move(json)); + } else if (role == Uptane::Role::TARGETS) { + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Targets()), std::move(json)); + } + } + + data::InstallationResult result = putMetadata2(meta_bundle); + + auto m = out_msg.present(AKIpUptaneMes_PR_putMetaResp2).putMetaResp2(); + m->result = static_cast(result.result_code.num_code); + SetString(&m->description, result.description); + + return ReturnCode::kOk; + } + + MsgHandler::ReturnCode putMeta2FailureHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + (void)in_msg; + + auto m = out_msg.present(AKIpUptaneMes_PR_putMetaResp2).putMetaResp2(); + m->result = static_cast(data::ResultCode::Numeric::kVerificationFailed); + SetString(&m->description, verification_failure); + + return ReturnCode::kOk; + } + + MsgHandler::ReturnCode installHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + auto result = install(ToString(in_msg.installReq()->hash)).result_code.num_code; + + out_msg.present(AKIpUptaneMes_PR_installResp).installResp()->result = + static_cast(result); + + if (data::ResultCode::Numeric::kNeedCompletion == result) { + return ReturnCode::kRebootRequired; + } + + return ReturnCode::kOk; + } + + MsgHandler::ReturnCode install2Hdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + auto result = install(ToString(in_msg.installReq()->hash)); + + auto m = out_msg.present(AKIpUptaneMes_PR_installResp2).installResp2(); + m->result = static_cast(result.result_code.num_code); + SetString(&m->description, result.description); + + if (data::ResultCode::Numeric::kNeedCompletion == result.result_code.num_code) { + return ReturnCode::kRebootRequired; + } + + return ReturnCode::kOk; + } + + MsgHandler::ReturnCode install2FailureHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + (void)in_msg; + + auto m = out_msg.present(AKIpUptaneMes_PR_installResp2).installResp2(); + m->result = static_cast(data::ResultCode::Numeric::kInstallFailed); + SetString(&m->description, installation_failure); + + return ReturnCode::kOk; + } + + MsgHandler::ReturnCode uploadDataHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + if (in_msg.uploadDataReq()->data.size < 0) { + out_msg.present(AKIpUptaneMes_PR_uploadDataResp).uploadDataResp()->result = AKInstallationResult_failure; + return ReturnCode::kOk; + } + + size_t data_size = static_cast(in_msg.uploadDataReq()->data.size); + auto result = receiveImageData(in_msg.uploadDataReq()->data.buf, data_size); + + auto m = out_msg.present(AKIpUptaneMes_PR_uploadDataResp).uploadDataResp(); + m->result = static_cast(result.result_code.num_code); + SetString(&m->description, result.description); + + return ReturnCode::kOk; + } + + MsgHandler::ReturnCode uploadDataFailureHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + (void)in_msg; + + auto m = out_msg.present(AKIpUptaneMes_PR_uploadDataResp).uploadDataResp(); + m->result = static_cast(data::ResultCode::Numeric::kDownloadFailed); + SetString(&m->description, upload_data_failure); + + return ReturnCode::kOk; + } + + MsgHandler::ReturnCode sendFirmwareHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + received_firmware_data_ = ToString(in_msg.sendFirmwareReq()->firmware); + out_msg.present(AKIpUptaneMes_PR_sendFirmwareResp).sendFirmwareResp()->result = AKInstallationResult_success; + + return ReturnCode::kOk; + } + + MsgHandler::ReturnCode downloadOstreeRev(Asn1Message& in_msg, Asn1Message& out_msg) { + tls_creds_ = ToString(in_msg.downloadOstreeRevReq()->tlsCred); + auto m = out_msg.present(AKIpUptaneMes_PR_downloadOstreeRevResp).downloadOstreeRevResp(); + m->result = static_cast(data::ResultCode::Numeric::kOk); + SetString(&m->description, ""); + + return ReturnCode::kOk; + } + + MsgHandler::ReturnCode downloadOstreeRevFailure(Asn1Message& in_msg, Asn1Message& out_msg) { + (void)in_msg; + + auto m = out_msg.present(AKIpUptaneMes_PR_downloadOstreeRevResp).downloadOstreeRevResp(); + m->result = static_cast(data::ResultCode::Numeric::kDownloadFailed); + SetString(&m->description, ostree_failure); + + return ReturnCode::kOk; + } + + MsgHandler::ReturnCode rootVerHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + // Note this shouldn't get called at all with Director metadata for TUF verification. + (void)in_msg; + + auto m = out_msg.present(AKIpUptaneMes_PR_rootVerResp).versionResp(); + // Very hacky! + m->version = 1; + + return ReturnCode::kOk; + } + + MsgHandler::ReturnCode rootVerFailureHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + (void)in_msg; + + auto m = out_msg.present(AKIpUptaneMes_PR_rootVerResp).rootVerResp(); + m->version = -1; + + return ReturnCode::kOk; + } + + MsgHandler::ReturnCode putRootHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + // Note this shouldn't get called at all with Director metadata for TUF verification. + auto pr = in_msg.putRootReq(); + if (pr->repotype == AKRepoType_director) { + meta_bundle_.emplace(std::make_pair(Uptane::RepositoryType::Director(), Uptane::Role::Root()), + ToString(pr->json)); + } else if (pr->repotype == AKRepoType_image) { + meta_bundle_.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Root()), ToString(pr->json)); + } + + auto m = out_msg.present(AKIpUptaneMes_PR_putRootResp).putRootResp(); + m->result = static_cast(data::ResultCode::Numeric::kOk); + SetString(&m->description, ""); + + return ReturnCode::kOk; + } + + MsgHandler::ReturnCode putRootFailureHdlr(Asn1Message& in_msg, Asn1Message& out_msg) { + (void)in_msg; + + auto m = out_msg.present(AKIpUptaneMes_PR_putRootResp).putRootResp(); + m->result = static_cast(data::ResultCode::Numeric::kVerificationFailed); + SetString(&m->description, verification_failure); + + return ReturnCode::kOk; + } + + data::InstallationResult putMetadata2(const Uptane::MetaBundle& meta_bundle) { + meta_bundle_ = meta_bundle; + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); + } + + data::InstallationResult receiveImageData(const uint8_t* data, size_t size) { + std::ofstream target_file(image_filepath_.c_str(), std::ofstream::out | std::ofstream::binary | std::ofstream::app); + + target_file.write(reinterpret_cast(data), static_cast(size)); + hasher_->update(data, size); + + target_file.close(); + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); + } + + data::InstallationResult install(const std::string& target_name) { + if (!received_firmware_data_.empty()) { + // data was received via the old request (sendFirmware) + if (target_name == "OSTREE") { + tls_creds_ = received_firmware_data_; + } else if (handler_version_ == HandlerVersion::kV1) { + // v2 calls this directly in uploadDataHdlr. + receiveImageData(reinterpret_cast(received_firmware_data_.c_str()), + received_firmware_data_.size()); + } + } + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); + } + + private: + const Uptane::EcuSerial serial_; + const Uptane::HardwareIdentifier hdw_id_; + const PublicKey pub_key_; + const Uptane::Manifest manifest_; + + Uptane::MetaBundle meta_bundle_; + + TemporaryDirectory image_dir_; + boost::filesystem::path image_filepath_; + std::shared_ptr hasher_; + std::unordered_map handler_map_; + std::string tls_creds_; + std::string received_firmware_data_; + VerificationType vtype_; + HandlerVersion handler_version_; +}; + +class TargetFile { + public: + TargetFile(const std::string filename, size_t size = 1024, Hash::Type hash_type = Hash::Type::kSha256) + : image_size_{size}, + image_filepath_{target_dir_ / filename}, + image_hash_{generateRandomFile(image_filepath_, size, hash_type)} {} + + std::string path() const { return image_filepath_.string(); } + const Hash& hash() const { return image_hash_; } + const size_t& size() const { return image_size_; } + + static Hash generateRandomFile(const boost::filesystem::path& filepath, size_t size, Hash::Type hash_type) { + auto hasher = MultiPartHasher::create(hash_type); + std::ofstream file{filepath.string(), std::ofstream::binary}; + + if (!file.is_open() || !file.good()) { + throw std::runtime_error("Failed to create a file: " + filepath.string()); + } + + const unsigned char symbols[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuv"; + unsigned char cur_symbol; + + for (unsigned int ii = 0; ii < size; ++ii) { + cur_symbol = symbols[static_cast(rand()) % sizeof(symbols)]; + file.put(static_cast(cur_symbol)); + hasher->update(&cur_symbol, sizeof(cur_symbol)); + } + + file.close(); + return hasher->getHash(); + } + + Uptane::Target createTarget(std::shared_ptr& package_manager_) { + Json::Value target_json; + target_json["custom"]["targetFormat"] = "BINARY"; + target_json["hashes"]["sha256"] = hash().HashString(); + target_json["length"] = size(); + Uptane::Target target = Uptane::Target(path(), target_json); + + auto fhandle = package_manager_->createTargetFile(target); + const std::string content = Utils::readFile(path()); + fhandle.write(const_cast(content.c_str()), static_cast(size())); + fhandle.close(); + + return target; + } + + private: + TemporaryDirectory target_dir_; + const size_t image_size_; + boost::filesystem::path image_filepath_; + Hash image_hash_; +}; + +class SecondaryRpcCommon : public ::testing::Test { + public: + const std::string ca_ = "ca"; + const std::string cert_ = "cert"; + const std::string pkey_ = "pkey"; + const std::string server_ = "ostree-server"; + const std::string director_root_ = "director-root"; + const std::string director_root_v2_ = "director-root-v2"; + const std::string director_targets_ = "director-targets"; + const std::string image_root_ = "image-root"; + const std::string image_root_v2_ = "image-root-v2"; + const std::string image_timestamp_ = "image-timestamp"; + const std::string image_snapshot_ = "image-snapshot"; + const std::string image_targets_ = "image-targets"; + + protected: + SecondaryRpcCommon(size_t image_size, HandlerVersion handler_version, VerificationType vtype) + : secondary_{Uptane::EcuSerial("serial"), + Uptane::HardwareIdentifier("hardware-id"), + PublicKey("pub-key", KeyType::kED25519), + Uptane::Manifest(), + vtype, + handler_version}, + secondary_server_{secondary_, "", 0}, + secondary_server_thread_{std::bind(&SecondaryRpcCommon::runSecondaryServer, this)}, + image_file_{"mytarget_image.img", image_size}, + vtype_{vtype} { + secondary_server_.wait_until_running(); + ip_secondary_ = Uptane::IpUptaneSecondary::connectAndCreate("localhost", secondary_server_.port(), vtype); + + config_.pacman.ostree_server = server_; + config_.pacman.type = PACKAGE_MANAGER_NONE; + config_.pacman.images_path = temp_dir_.Path() / "images"; + config_.storage.path = temp_dir_.Path(); + + storage_ = INvStorage::newStorage(config_.storage); + storage_->storeTlsCreds(ca_, cert_, pkey_); + storage_->storeRoot(director_root_, Uptane::RepositoryType::Director(), Uptane::Version(1)); + storage_->storeNonRoot(director_targets_, Uptane::RepositoryType::Director(), Uptane::Role::Targets()); + storage_->storeRoot(image_root_, Uptane::RepositoryType::Image(), Uptane::Version(1)); + storage_->storeNonRoot(image_timestamp_, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()); + storage_->storeNonRoot(image_snapshot_, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()); + storage_->storeNonRoot(image_targets_, Uptane::RepositoryType::Image(), Uptane::Role::Targets()); + + package_manager_ = PackageManagerFactory::makePackageManager(config_.pacman, config_.bootloader, storage_, nullptr); + secondary_provider_ = SecondaryProviderBuilder::Build(config_, storage_, package_manager_); + ip_secondary_->init(secondary_provider_); + } + + ~SecondaryRpcCommon() { + secondary_server_.stop(); + secondary_server_thread_.join(); + } + + void runSecondaryServer() { secondary_server_.run(); } + + void resetHandlers(HandlerVersion handler_version) { + secondary_.setHandlerVersion(handler_version); + secondary_.registerHandlers(); + } + + void verifyMetadata(const Uptane::MetaBundle& meta_bundle) { + if (vtype_ == VerificationType::kFull) { + EXPECT_EQ(Uptane::getMetaFromBundle(meta_bundle, Uptane::RepositoryType::Director(), Uptane::Role::Root()), + latest_director_root_); + EXPECT_EQ(Uptane::getMetaFromBundle(meta_bundle, Uptane::RepositoryType::Director(), Uptane::Role::Targets()), + director_targets_); + } + EXPECT_EQ(Uptane::getMetaFromBundle(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Root()), + latest_image_root_); + EXPECT_EQ(Uptane::getMetaFromBundle(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()), + image_timestamp_); + EXPECT_EQ(Uptane::getMetaFromBundle(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()), + image_snapshot_); + EXPECT_EQ(Uptane::getMetaFromBundle(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Targets()), + image_targets_); + } + + void sendAndInstallBinaryImage() { + Uptane::Target target = image_file_.createTarget(package_manager_); + const HandlerVersion handler_version = secondary_.handlerVersion(); + + data::InstallationResult result = ip_secondary_->putMetadata(target); + if (handler_version == HandlerVersion::kV2Failure) { + EXPECT_EQ(result.result_code, data::ResultCode::Numeric::kVerificationFailed); + EXPECT_EQ(result.description, secondary_.verification_failure); + } else { + EXPECT_TRUE(result.isSuccess()); + verifyMetadata(secondary_.metadata()); + } + + result = ip_secondary_->sendFirmware(target); + if (handler_version == HandlerVersion::kV2Failure) { + EXPECT_EQ(result.result_code, data::ResultCode::Numeric::kDownloadFailed); + EXPECT_EQ(result.description, secondary_.upload_data_failure); + } else { + EXPECT_TRUE(result.isSuccess()); + } + + result = ip_secondary_->install(target); + if (handler_version == HandlerVersion::kV2Failure) { + EXPECT_EQ(result.result_code, data::ResultCode::Numeric::kInstallFailed); + EXPECT_EQ(result.description, secondary_.installation_failure); + } else { + EXPECT_TRUE(result.isSuccess()); + EXPECT_EQ(image_file_.hash(), secondary_.getReceivedImageHash()); + } + } + + void installOstreeRev() { + Json::Value target_json; + target_json["custom"]["targetFormat"] = "OSTREE"; + Uptane::Target target = Uptane::Target("OSTREE", target_json); + + const HandlerVersion handler_version = secondary_.handlerVersion(); + + data::InstallationResult result = ip_secondary_->putMetadata(target); + if (handler_version == HandlerVersion::kV2Failure) { + EXPECT_EQ(result.result_code, data::ResultCode::Numeric::kVerificationFailed); + EXPECT_EQ(result.description, secondary_.verification_failure); + } else { + EXPECT_TRUE(result.isSuccess()); + verifyMetadata(secondary_.metadata()); + } + + result = ip_secondary_->sendFirmware(target); + if (handler_version == HandlerVersion::kV2Failure) { + EXPECT_EQ(result.result_code, data::ResultCode::Numeric::kDownloadFailed); + EXPECT_EQ(result.description, secondary_.ostree_failure); + } else { + EXPECT_TRUE(result.isSuccess()); + } + + result = ip_secondary_->install(target); + if (handler_version == HandlerVersion::kV2Failure) { + EXPECT_EQ(result.result_code, data::ResultCode::Numeric::kInstallFailed); + EXPECT_EQ(result.description, secondary_.installation_failure); + } else { + EXPECT_TRUE(result.isSuccess()); + std::string archive = secondary_.getReceivedTlsCreds(); + { + std::stringstream as(archive); + EXPECT_EQ(ca_, Utils::readFileFromArchive(as, "ca.pem")); + } + { + std::stringstream as(archive); + EXPECT_EQ(cert_, Utils::readFileFromArchive(as, "client.pem")); + } + { + std::stringstream as(archive); + EXPECT_EQ(pkey_, Utils::readFileFromArchive(as, "pkey.pem")); + } + { + std::stringstream as(archive); + EXPECT_EQ(server_, Utils::readFileFromArchive(as, "server.url", true)); + } + } + } + + void rotateRoot() { + const HandlerVersion handler_version = secondary_.handlerVersion(); + + const int32_t droot_ver = ip_secondary_->getRootVersion(true); + const int32_t iroot_ver = ip_secondary_->getRootVersion(false); + if (handler_version == HandlerVersion::kV1 || handler_version == HandlerVersion::kV2Failure) { + EXPECT_EQ(droot_ver, -1); + EXPECT_EQ(iroot_ver, -1); + } else { + if (vtype_ == VerificationType::kTuf) { + EXPECT_EQ(droot_ver, 0); + } else { + EXPECT_EQ(droot_ver, 1); + } + EXPECT_EQ(iroot_ver, 1); + } + + data::InstallationResult dresult = ip_secondary_->putRoot(director_root_v2_, true); + data::InstallationResult iresult = ip_secondary_->putRoot(image_root_v2_, false); + if (handler_version == HandlerVersion::kV1 || handler_version == HandlerVersion::kV2Failure) { + EXPECT_EQ(dresult.result_code, data::ResultCode::Numeric::kVerificationFailed); + EXPECT_EQ(dresult.description, secondary_.verification_failure); + EXPECT_EQ(iresult.result_code, data::ResultCode::Numeric::kVerificationFailed); + EXPECT_EQ(iresult.description, secondary_.verification_failure); + } else { + if (vtype_ == VerificationType::kTuf) { + EXPECT_EQ(dresult.result_code, data::ResultCode::Numeric::kOk); + EXPECT_EQ(dresult.description, + "Secondary serial uses TUF verification and thus does not require Director Root metadata."); + } else { + EXPECT_TRUE(dresult.isSuccess()); + EXPECT_EQ(dresult.description, ""); + } + EXPECT_TRUE(iresult.isSuccess()); + verifyMetadata(secondary_.metadata()); + } + } + + SecondaryMock secondary_; + std::shared_ptr secondary_provider_; + SecondaryTcpServer secondary_server_; + std::thread secondary_server_thread_; + TargetFile image_file_; + VerificationType vtype_; + SecondaryInterface::Ptr ip_secondary_; + TemporaryDirectory temp_dir_; + std::shared_ptr storage_; + Config config_; + std::shared_ptr package_manager_; + std::string latest_director_root_{director_root_}; + std::string latest_image_root_{image_root_}; +}; + +class SecondaryRpcTest : public SecondaryRpcCommon, + public ::testing::WithParamInterface> { + protected: + SecondaryRpcTest() : SecondaryRpcCommon(std::get<0>(GetParam()), std::get<1>(GetParam()), std::get<2>(GetParam())) {} +}; + +// Test the serialization/deserialization and the TCP/IP communication implementation +// that occurs during communication between Primary and IP Secondary +TEST_P(SecondaryRpcTest, AllRpcCallsTest) { + ASSERT_TRUE(ip_secondary_ != nullptr) << "Failed to create IP Secondary"; + EXPECT_EQ(ip_secondary_->getSerial(), secondary_.serial()); + EXPECT_EQ(ip_secondary_->getHwId(), secondary_.hwID()); + EXPECT_EQ(ip_secondary_->getPublicKey(), secondary_.publicKey()); + EXPECT_EQ(ip_secondary_->getManifest(), secondary_.manifest()); + + sendAndInstallBinaryImage(); + + installOstreeRev(); + + rotateRoot(); +} + +/* These tests use a mock of most of the Secondary internals in order to test + * the RPC mechanism between the Primary and IP Secondary. The tests cover the + * old/v1/fallback handlers as well as the new/v2 versions. */ +INSTANTIATE_TEST_SUITE_P(SecondaryRpcTestCases, SecondaryRpcTest, + ::testing::Values(std::make_tuple(1, HandlerVersion::kV2, VerificationType::kFull), + std::make_tuple(1024, HandlerVersion::kV2, VerificationType::kFull), + std::make_tuple(1024 - 1, HandlerVersion::kV2, VerificationType::kFull), + std::make_tuple(1024 + 1, HandlerVersion::kV2, VerificationType::kFull), + std::make_tuple(1024 * 10 + 1, HandlerVersion::kV2, VerificationType::kFull), + std::make_tuple(1, HandlerVersion::kV2, VerificationType::kTuf), + std::make_tuple(1024, HandlerVersion::kV2, VerificationType::kTuf), + std::make_tuple(1024 - 1, HandlerVersion::kV2, VerificationType::kTuf), + std::make_tuple(1024 + 1, HandlerVersion::kV2, VerificationType::kTuf), + std::make_tuple(1024 * 10 + 1, HandlerVersion::kV2, VerificationType::kTuf), + std::make_tuple(1, HandlerVersion::kV1, VerificationType::kFull), + std::make_tuple(1024, HandlerVersion::kV1, VerificationType::kFull), + std::make_tuple(1024 - 1, HandlerVersion::kV1, VerificationType::kFull), + std::make_tuple(1024 + 1, HandlerVersion::kV1, VerificationType::kFull), + std::make_tuple(1024 * 10 + 1, HandlerVersion::kV1, VerificationType::kFull), + std::make_tuple(1024, HandlerVersion::kV2Failure, VerificationType::kFull))); + +class SecondaryRpcUpgrade : public SecondaryRpcCommon { + protected: + SecondaryRpcUpgrade() : SecondaryRpcCommon(1024, HandlerVersion::kV1, VerificationType::kFull) {} +}; + +/* Test upgrade and downgrade of protocol versions after installation, both for + * binary and OSTree updates. */ +TEST_F(SecondaryRpcUpgrade, VersionUpgrade) { + ASSERT_TRUE(ip_secondary_ != nullptr) << "Failed to create IP Secondary"; + + sendAndInstallBinaryImage(); + resetHandlers(HandlerVersion::kV2); + secondary_.resetImageHash(); + sendAndInstallBinaryImage(); + resetHandlers(HandlerVersion::kV1); + secondary_.resetImageHash(); + sendAndInstallBinaryImage(); + + resetHandlers(HandlerVersion::kV1); + installOstreeRev(); + resetHandlers(HandlerVersion::kV2); + installOstreeRev(); + resetHandlers(HandlerVersion::kV1); + installOstreeRev(); +} + +TEST(SecondaryTcpServer, TestIpSecondaryIfSecondaryIsNotRunning) { + in_port_t secondary_port = TestUtils::getFreePortAsInt(); + SecondaryInterface::Ptr ip_secondary; + + // Try to connect to a non-running Secondary and create a corresponding instance on Primary. + ip_secondary = Uptane::IpUptaneSecondary::connectAndCreate("localhost", secondary_port, VerificationType::kFull); + EXPECT_EQ(ip_secondary, nullptr); + + // Create Secondary on Primary without actually connecting to Secondary. + ip_secondary = std::make_shared( + "localhost", secondary_port, VerificationType::kFull, Uptane::EcuSerial("serial"), + Uptane::HardwareIdentifier("hwid"), PublicKey("key", KeyType::kED25519)); + + TemporaryDirectory temp_dir; + Config config; + config.storage.path = temp_dir.Path(); + config.pacman.type = PACKAGE_MANAGER_NONE; + config.pacman.images_path = temp_dir.Path() / "images"; + std::shared_ptr storage = INvStorage::newStorage(config.storage); + std::shared_ptr package_manager = + PackageManagerFactory::makePackageManager(config.pacman, config.bootloader, storage, nullptr); + std::shared_ptr secondary_provider = + SecondaryProviderBuilder::Build(config, storage, package_manager); + ip_secondary->init(secondary_provider); + + // Expect nothing since the Secondary is not running. + EXPECT_EQ(ip_secondary->getManifest(), Json::Value()); + + TargetFile target_file("mytarget_image.img"); + Uptane::Target target = target_file.createTarget(package_manager); + + // Expect failures since the Secondary is not running. + EXPECT_EQ(ip_secondary->getRootVersion(true), 0); + EXPECT_EQ(ip_secondary->getRootVersion(false), 0); + EXPECT_FALSE(ip_secondary->putRoot("director-root-v2", true).isSuccess()); + EXPECT_FALSE(ip_secondary->putRoot("image-root-v2", false).isSuccess()); + EXPECT_FALSE(ip_secondary->putMetadata(target).isSuccess()); + EXPECT_FALSE(ip_secondary->sendFirmware(target).isSuccess()); + EXPECT_FALSE(ip_secondary->install(target).isSuccess()); +} + +/* This class returns a positive result for every message. The test cases verify + * that the implementation can recover from situations where something goes wrong. */ +class SecondaryRpcTestPositive : public ::testing::Test, public MsgHandler { + protected: + SecondaryRpcTestPositive() + : secondary_server_{*this, "", 0}, secondary_server_thread_{[&]() { secondary_server_.run(); }} { + secondary_server_.wait_until_running(); + } + + ~SecondaryRpcTestPositive() { + secondary_server_.stop(); + secondary_server_thread_.join(); + } + + // Override default implementation with a stub that always returns success. + ReturnCode handleMsg(const Asn1Message::Ptr& in_msg, Asn1Message::Ptr& out_msg) override { + (void)in_msg; + out_msg->present(AKIpUptaneMes_PR_installResp).installResp()->result = AKInstallationResultCode_ok; + return ReturnCode::kOk; + } + + AKIpUptaneMes_PR sendInstallMsg() { + // compose and send a valid message + Asn1Message::Ptr req(Asn1Message::Empty()); + req->present(AKIpUptaneMes_PR_installReq); + + // prepare request message + auto req_mes = req->installReq(); + SetString(&req_mes->hash, "target_name"); + // send request and receive response, a request-response type of RPC + std::pair secondary_server_addr{"127.0.0.1", secondary_server_.port()}; + auto resp = Asn1Rpc(req, secondary_server_addr); + + return resp->present(); + } + + protected: + SecondaryTcpServer secondary_server_; + std::thread secondary_server_thread_; +}; + +/* This test fails because the Secondary TCP server implementation is a + * single-threaded, synchronous and blocking hence it cannot accept any new + * connections until the current one is closed. Therefore, if a client/Primary + * does not close its socket for some reason then Secondary becomes + * "unavailable" */ +// TEST_F(SecondaryRpcTestPositive, primaryNotClosingSocket) { +// ConnectionSocket con_sock{"127.0.0.1", secondary_server_.port()}; +// con_sock.connect(); +// ASSERT_EQ(sendInstallMsg(), AKIpUptaneMes_PR_installResp); +//} + +TEST_F(SecondaryRpcTestPositive, primaryConnectAndDisconnect) { + ConnectionSocket{"127.0.0.1", secondary_server_.port()}.connect(); + // do a valid request/response exchange to verify if Secondary works as expected + // after accepting and closing a new connection + ASSERT_EQ(sendInstallMsg(), AKIpUptaneMes_PR_installResp); +} + +TEST_F(SecondaryRpcTestPositive, primaryConnectAndSendValidButNotSupportedMsg) { + { + ConnectionSocket con_sock{"127.0.0.1", secondary_server_.port()}; + con_sock.connect(); + uint8_t garbage[] = {0x30, 0x13, 0x02, 0x01, 0x05, 0x16, 0x0e, 0x41, 0x6e, 0x79, 0x62, + 0x6f, 0x64, 0x79, 0x20, 0x74, 0x68, 0x65, 0x72, 0x65, 0x3f}; + send(*con_sock, garbage, sizeof(garbage), 0); + } + // do a valid request/response exchange to verify if Secondary works as expected + // after receiving not-supported message + ASSERT_EQ(sendInstallMsg(), AKIpUptaneMes_PR_installResp); +} + +TEST_F(SecondaryRpcTestPositive, primaryConnectAndSendBrokenAsn1Msg) { + { + ConnectionSocket con_sock{"127.0.0.1", secondary_server_.port()}; + con_sock.connect(); + uint8_t garbage[] = {0x30, 0x99, 0x02, 0x01, 0x05, 0x16, 0x0e, 0x41, 0x6e, 0x79, + 0x62, 0x6f, 0x64, 0x79, 0x20, 0x74, 0x68, 0x72, 0x65, 0x3f}; + send(*con_sock, garbage, sizeof(garbage), 0); + } + // do a valid request/response exchange to verify if Secondary works as expected + // after receiving broken ASN1 message + ASSERT_EQ(sendInstallMsg(), AKIpUptaneMes_PR_installResp); +} + +TEST_F(SecondaryRpcTestPositive, primaryConnectAndSendGarbage) { + { + ConnectionSocket con_sock{"127.0.0.1", secondary_server_.port()}; + con_sock.connect(); + uint8_t garbage[] = "some garbage message"; + send(*con_sock, garbage, sizeof(garbage), 0); + } + // do a valid request/response exchange to verify if Secondary works as expected + // after receiving some garbage + ASSERT_EQ(sendInstallMsg(), AKIpUptaneMes_PR_installResp); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + + logger_init(); + logger_set_threshold(boost::log::trivial::debug); + + return RUN_ALL_TESTS(); +} diff --git a/src/aktualizr_secondary/secondary_tcp_server.cc b/src/aktualizr_secondary/secondary_tcp_server.cc new file mode 100644 index 0000000000..9368671017 --- /dev/null +++ b/src/aktualizr_secondary/secondary_tcp_server.cc @@ -0,0 +1,195 @@ +#include "secondary_tcp_server.h" + +#include + +#include "AKInstallationResultCode.h" +#include "AKIpUptaneMes.h" +#include "asn1/asn1_message.h" +#include "logging/logging.h" +#include "msg_handler.h" +#include "utilities/dequeue_buffer.h" + +SecondaryTcpServer::SecondaryTcpServer(MsgHandler &msg_handler, const std::string &primary_ip, in_port_t primary_port, + in_port_t port, bool reboot_after_install) + : msg_handler_(msg_handler), + listen_socket_(port), + keep_running_(true), + reboot_after_install_(reboot_after_install), + is_running_(false) { + if (primary_ip.empty()) { + return; + } + + ConnectionSocket conn_socket(primary_ip, primary_port, listen_socket_.port()); + if (conn_socket.connect() == 0) { + LOG_INFO << "Connected to Primary, sending info about this Secondary."; + HandleOneConnection(*conn_socket); + } else { + LOG_INFO << "Failed to connect to Primary."; + } +} + +void SecondaryTcpServer::run() { + if (listen(*listen_socket_, SOMAXCONN) < 0) { + throw std::system_error(errno, std::system_category(), "listen"); + } + LOG_INFO << "Secondary TCP server listening on " << listen_socket_.ToString(); + + { + std::unique_lock lock(running_condition_mutex_); + is_running_ = true; + running_condition_.notify_all(); + } + + bool first_connection = true; + + while (keep_running_.load()) { + sockaddr_storage peer_sa{}; + socklen_t peer_sa_size = sizeof(sockaddr_storage); + + LOG_DEBUG << "Waiting for connection from Primary..."; + int con_fd = accept(*listen_socket_, reinterpret_cast(&peer_sa), &peer_sa_size); + if (con_fd == -1) { + // Accept can fail if a client closes connection/client socket before a TCP handshake completes or + // a network connection goes down in the middle of a TCP handshake procedure. At first glance it looks like + // we can just continue listening/accepting new connections in such cases instead of exiting from the server loop + // which leads to exiting of the overall daemon process. + // But, accept() failure, potentially can be caused by some incorrect state of the listening socket + // which means that it will keep returning error, so, exiting from the daemon process and letting + // systemd to restart it looks like the most reliable solution that covers all edge cases. + LOG_INFO << "Socket accept failed, aborting."; + break; + } + + if (first_connection) { + LOG_INFO << "Primary connected."; + first_connection = false; + } else { + LOG_DEBUG << "Primary reconnected."; + } + auto continue_running = HandleOneConnection(*Socket(con_fd)); + if (!continue_running) { + keep_running_.store(false); + } + LOG_DEBUG << "Primary disconnected."; + } + + { + std::unique_lock lock(running_condition_mutex_); + is_running_ = false; + running_condition_.notify_all(); + } + + LOG_INFO << "Secondary TCP server exiting."; +} + +void SecondaryTcpServer::stop() { + LOG_DEBUG << "Stopping Secondary TCP server..."; + keep_running_.store(false); + // unblock accept + ConnectionSocket("localhost", listen_socket_.port()).connect(); +} + +in_port_t SecondaryTcpServer::port() const { return listen_socket_.port(); } +SecondaryTcpServer::ExitReason SecondaryTcpServer::exit_reason() const { return exit_reason_; } + +static bool sendResponseMessage(int socket_fd, const Asn1Message::Ptr &resp_msg); + +bool SecondaryTcpServer::HandleOneConnection(int socket) { + // Outside the message loop, because one recv() may have parts of 2 messages + // Note that one recv() call returning 2+ messages doesn't work at the + // moment. This shouldn't be a problem until we have messages that aren't + // strictly request/response + DequeueBuffer buffer; + bool keep_running_server = true; + bool keep_running_current_session = true; + + while (keep_running_current_session) { // Keep reading until we get an error + // Read an incoming message + AKIpUptaneMes_t *m = nullptr; + asn_dec_rval_t res{}; + asn_codec_ctx_s context{}; + ssize_t received; + + do { + received = recv(socket, buffer.Tail(), buffer.TailSpace(), 0); + if (received < 0) { + LOG_ERROR << "Failed to read data from a server socket: " << strerror(errno); + break; + } + buffer.HaveEnqueued(static_cast(received)); + res = ber_decode(&context, &asn_DEF_AKIpUptaneMes, reinterpret_cast(&m), buffer.Head(), buffer.Size()); + buffer.Consume(res.consumed); + } while (res.code == RC_WMORE && received > 0); + // Note that ber_decode allocates *m even on failure, so this must always be done + Asn1Message::Ptr request_msg = Asn1Message::FromRaw(&m); + + if (received == 0) { + LOG_TRACE << "Primary has closed a connection socket"; + break; + } + + if (received < 0) { + LOG_ERROR << "Error while reading message data from a socket: " << strerror(errno); + break; + } + + if (res.code != RC_OK) { + LOG_ERROR << "Failed to decode a message received from Primary"; + break; + } + + LOG_DEBUG << "Received a request from Primary: " << request_msg->toStr(); + Asn1Message::Ptr response_msg = Asn1Message::Empty(); + MsgHandler::ReturnCode handle_status_code = msg_handler_.handleMsg(request_msg, response_msg); + + switch (handle_status_code) { + case MsgHandler::ReturnCode::kRebootRequired: { + exit_reason_ = ExitReason::kRebootNeeded; + keep_running_current_session = sendResponseMessage(socket, response_msg); + if (reboot_after_install_) { + keep_running_server = keep_running_current_session = false; + } + break; + } + case MsgHandler::ReturnCode::kOk: { + keep_running_current_session = sendResponseMessage(socket, response_msg); + break; + } + case MsgHandler::ReturnCode::kUnkownMsg: + default: { + // TODO: consider sending NOT_SUPPORTED/Unknown message and closing connection socket + keep_running_current_session = false; + LOG_INFO << "Unsupported message received from Primary: " << request_msg->toStr(); + } + } // switch + + } // Go back round and read another message + + return keep_running_server; + // Parse error => Shutdown the socket + // write error => Shutdown the socket + // Timeout on write => shutdown +} + +void SecondaryTcpServer::wait_until_running(int timeout) { + std::unique_lock lock(running_condition_mutex_); + running_condition_.wait_for(lock, std::chrono::seconds(timeout), [&] { return is_running_; }); +} + +bool sendResponseMessage(int socket_fd, const Asn1Message::Ptr &resp_msg) { + LOG_DEBUG << "Encoding and sending response message"; + + int optval = 0; + setsockopt(socket_fd, IPPROTO_TCP, TCP_NODELAY, &optval, sizeof(int)); + asn_enc_rval_t encode_result = der_encode(&asn_DEF_AKIpUptaneMes, &resp_msg->msg_, Asn1SocketWriteCallback, + reinterpret_cast(&socket_fd)); + if (encode_result.encoded == -1) { + LOG_ERROR << "Failed to encode a response message"; + return false; // write error + } + optval = 1; + setsockopt(socket_fd, IPPROTO_TCP, TCP_NODELAY, &optval, sizeof(int)); + + return true; +} diff --git a/src/aktualizr_secondary/secondary_tcp_server.h b/src/aktualizr_secondary/secondary_tcp_server.h new file mode 100644 index 0000000000..1a3db1490e --- /dev/null +++ b/src/aktualizr_secondary/secondary_tcp_server.h @@ -0,0 +1,57 @@ +#ifndef AKTUALIZR_SECONDARY_TCP_SERVER_H_ +#define AKTUALIZR_SECONDARY_TCP_SERVER_H_ + +#include +#include +#include + +#include "utilities/utils.h" + +class MsgHandler; + +/** + * Listens on a socket, decodes calls (ASN.1) and forwards them to an Uptane Secondary + * implementation + */ +class SecondaryTcpServer { + public: + enum class ExitReason { + kNotApplicable, + kRebootNeeded, + kUnkown, + }; + + SecondaryTcpServer(MsgHandler& msg_handler, const std::string& primary_ip, in_port_t primary_port, in_port_t port = 0, + bool reboot_after_install = false); + ~SecondaryTcpServer() = default; + SecondaryTcpServer(const SecondaryTcpServer&) = delete; + SecondaryTcpServer(SecondaryTcpServer&&) = delete; + SecondaryTcpServer& operator=(const SecondaryTcpServer&) = delete; + SecondaryTcpServer& operator=(const SecondaryTcpServer&&) = delete; + + /** + * Accept connections on the socket, decode requests and respond using the secondary implementation + */ + void run(); + void stop(); + + void wait_until_running(int timeout = 10); + + in_port_t port() const; + ExitReason exit_reason() const; + + private: + bool HandleOneConnection(int socket); + + MsgHandler& msg_handler_; + ListenSocket listen_socket_; + std::atomic keep_running_; + bool reboot_after_install_; + ExitReason exit_reason_{ExitReason::kNotApplicable}; + + bool is_running_; + std::mutex running_condition_mutex_; + std::condition_variable running_condition_; +}; + +#endif // AKTUALIZR_SECONDARY_TCP_SERVER_H_ diff --git a/src/aktualizr_secondary/socket_server.cc b/src/aktualizr_secondary/socket_server.cc deleted file mode 100644 index 5ff3cd1eb5..0000000000 --- a/src/aktualizr_secondary/socket_server.cc +++ /dev/null @@ -1,187 +0,0 @@ -#include "socket_server.h" - -#include - -#include "AKIpUptaneMes.h" -#include "asn1/asn1_message.h" -#include "logging/logging.h" -#include "socket_activation/socket_activation.h" -#include "utilities/dequeue_buffer.h" -#include "utilities/sockaddr_io.h" - -#include -#include -#include - -void SocketServer::Run() { - if (listen(*socket_, SOMAXCONN) < 0) { - throw std::system_error(errno, std::system_category(), "listen"); - } - LOG_INFO << "Listening on " << Utils::ipGetSockaddr(*socket_); - - while (true) { - int con_fd; - sockaddr_storage peer_sa{}; - socklen_t peer_sa_size = sizeof(sockaddr_storage); - - LOG_DEBUG << "Waiting for connection from client..."; - if ((con_fd = accept(*socket_, reinterpret_cast(&peer_sa), &peer_sa_size)) == -1) { - LOG_INFO << "Socket accept failed. aborting"; - break; - } - LOG_DEBUG << "Connected..."; - HandleOneConnection(con_fd); - LOG_DEBUG << "Client disconnected"; - } -} - -void SocketServer::HandleOneConnection(int socket) { - // Outside the message loop, because one recv() may have parts of 2 messages - // Note that one recv() call returning 2+ messages doesn't work at the - // moment. This shouldn't be a problem until we have messages that aren't - // strictly request/response - DequeueBuffer buffer; - - while (true) { // Keep reading until we get an error - // Read an incomming message - AKIpUptaneMes_t *m = nullptr; - asn_dec_rval_t res; - asn_codec_ctx_s context{}; - ssize_t received; - do { - received = recv(socket, buffer.Tail(), buffer.TailSpace(), 0); - LOG_TRACE << "Got " << received << " bytes " - << Utils::toBase64(std::string(buffer.Tail(), static_cast(received))); - buffer.HaveEnqueued(static_cast(received)); - res = ber_decode(&context, &asn_DEF_AKIpUptaneMes, reinterpret_cast(&m), buffer.Head(), buffer.Size()); - buffer.Consume(res.consumed); - } while (res.code == RC_WMORE && received > 0); - // Note that ber_decode allocates *m even on failure, so this must always be done - Asn1Message::Ptr msg = Asn1Message::FromRaw(&m); - - if (res.code != RC_OK) { - return; // Either an error or the client closed the socket - } - - // Figure out what to do with the message - Asn1Message::Ptr resp = Asn1Message::Empty(); - switch (msg->present()) { - case AKIpUptaneMes_PR_getInfoReq: { - Uptane::EcuSerial serial = impl_->getSerial(); - Uptane::HardwareIdentifier hw_id = impl_->getHwId(); - PublicKey pk = impl_->getPublicKey(); - resp->present(AKIpUptaneMes_PR_getInfoResp); - auto r = resp->getInfoResp(); - SetString(&r->ecuSerial, serial.ToString()); - SetString(&r->hwId, hw_id.ToString()); - r->keyType = static_cast(pk.Type()); - SetString(&r->key, pk.Value()); - } break; - case AKIpUptaneMes_PR_manifestReq: { - std::string manifest = Utils::jsonToStr(impl_->getManifest()); - resp->present(AKIpUptaneMes_PR_manifestResp); - auto r = resp->manifestResp(); - r->manifest.present = manifest_PR_json; - SetString(&r->manifest.choice.json, manifest); // NOLINT - } break; - case AKIpUptaneMes_PR_putMetaReq: { - auto md = msg->putMetaReq(); - Uptane::RawMetaPack meta_pack; - if (md->image.present == image_PR_json) { - meta_pack.image_root = ToString(md->image.choice.json.root); // NOLINT - meta_pack.image_targets = ToString(md->image.choice.json.targets); // NOLINT - meta_pack.image_snapshot = ToString(md->image.choice.json.snapshot); // NOLINT - meta_pack.image_timestamp = ToString(md->image.choice.json.timestamp); // NOLINT - } else { - LOG_WARNING << "Images metadata in unknown format:" << md->image.present; - } - - if (md->director.present == director_PR_json) { - meta_pack.director_root = ToString(md->director.choice.json.root); // NOLINT - meta_pack.director_targets = ToString(md->director.choice.json.targets); // NOLINT - } else { - LOG_WARNING << "Director metadata in unknown format:" << md->director.present; - } - bool ok; - try { - ok = impl_->putMetadata(meta_pack); - } catch (Uptane::SecurityException &e) { - LOG_WARNING << "Rejected metadata push because of security failure" << e.what(); - ok = false; - } - resp->present(AKIpUptaneMes_PR_putMetaResp); - auto r = resp->putMetaResp(); - r->result = ok ? AKInstallationResult_success : AKInstallationResult_failure; - } break; - case AKIpUptaneMes_PR_sendFirmwareReq: { - auto fw = msg->sendFirmwareReq(); - auto fw_data = std::make_shared(ToString(fw->firmware)); - auto fut = std::async(std::launch::async, &Uptane::SecondaryInterface::sendFirmware, impl_, fw_data); - resp->present(AKIpUptaneMes_PR_sendFirmwareResp); - auto r = resp->sendFirmwareResp(); - r->result = fut.get() ? AKInstallationResult_success : AKInstallationResult_failure; - } break; - default: - LOG_ERROR << "Unrecognised message type:" << msg->present(); - return; - } - - // Send the response - if (resp->present() != AKIpUptaneMes_PR_NOTHING) { - int optval = 0; - setsockopt(socket, IPPROTO_TCP, TCP_NODELAY, &optval, sizeof(int)); - asn_enc_rval_t encode_result = - der_encode(&asn_DEF_AKIpUptaneMes, &resp->msg_, Asn1SocketWriteCallback, reinterpret_cast(&socket)); - if (encode_result.encoded == -1) { - return; // write error - } - optval = 1; - setsockopt(socket, IPPROTO_TCP, TCP_NODELAY, &optval, sizeof(int)); - } else { - LOG_DEBUG << "Not sending a response to message " << msg->present(); - } - } // Go back round and read another message - - // Parse error => Shutdown the socket - // write error => Shutdown the socket - // Timeout on write => shutdown -} - -SocketHandle SocketFromSystemdOrPort(in_port_t port) { - if (socket_activation::listen_fds(0) >= 1) { - LOG_INFO << "Using socket activation for main service"; - return SocketHandle(new int(socket_activation::listen_fds_start)); - } - - LOG_INFO << "Received " << socket_activation::listen_fds(0) - << " sockets, not using socket activation for main service"; - - // manual socket creation - int socket_fd = socket(AF_INET6, SOCK_STREAM, 0); - if (socket_fd < 0) { - throw std::runtime_error("socket creation failed"); - } - SocketHandle hdl(new int(socket_fd)); - sockaddr_in6 sa{}; - - memset(&sa, 0, sizeof(sa)); - sa.sin6_family = AF_INET6; - sa.sin6_port = htons(port); - sa.sin6_addr = IN6ADDR_ANY_INIT; - - int v6only = 0; - if (setsockopt(*hdl, IPPROTO_IPV6, IPV6_V6ONLY, &v6only, sizeof(v6only)) < 0) { - throw std::system_error(errno, std::system_category(), "setsockopt(IPV6_V6ONLY)"); - } - - int reuseaddr = 1; - if (setsockopt(*hdl, SOL_SOCKET, SO_REUSEADDR, &reuseaddr, sizeof(reuseaddr)) < 0) { - throw std::system_error(errno, std::system_category(), "setsockopt(SO_REUSEADDR)"); - } - - if (bind(*hdl, reinterpret_cast(&sa), sizeof(sa)) < 0) { - throw std::system_error(errno, std::system_category(), "bind"); - } - - return hdl; -} diff --git a/src/aktualizr_secondary/socket_server.h b/src/aktualizr_secondary/socket_server.h deleted file mode 100644 index 961301bf66..0000000000 --- a/src/aktualizr_secondary/socket_server.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef AKTUALIZR_SECONDARY_SOCKET_SERVER_H_ -#define AKTUALIZR_SECONDARY_SOCKET_SERVER_H_ - -#include "uptane/secondaryinterface.h" -#include "utilities/utils.h" - -/** - * Listens on a socket, decodes calls and forwards them to an Uptane Secondary - * implementation - */ -class SocketServer { - public: - SocketServer(std::shared_ptr implementation, SocketHandle socket) - : impl_(std::move(implementation)), socket_(std::move(socket)) {} - - /** - * Accept connections on the socket, decode requests and respond using the - * wrapped secondary - */ - void Run(); - - void HandleOneConnection(int socket); - - private: - std::shared_ptr impl_; - SocketHandle socket_; -}; - -/** - * If we are running under systemd, return the first fd that was handed to us - * via socket-activation, otherwise create and return a server socket on the - * given port number - */ -SocketHandle SocketFromSystemdOrPort(in_port_t port); - -#endif // AKTUALIZR_SECONDARY_SOCKET_SERVER_H_ \ No newline at end of file diff --git a/src/aktualizr_secondary/update_agent.h b/src/aktualizr_secondary/update_agent.h new file mode 100644 index 0000000000..a46f73d56f --- /dev/null +++ b/src/aktualizr_secondary/update_agent.h @@ -0,0 +1,28 @@ +#ifndef AKTUALIZR_SECONDARY_UPDATE_AGENT_H +#define AKTUALIZR_SECONDARY_UPDATE_AGENT_H + +#include "crypto/crypto.h" +#include "uptane/tuf.h" + +class UpdateAgent { + public: + using Ptr = std::shared_ptr; + + virtual ~UpdateAgent() = default; + UpdateAgent(const UpdateAgent&) = delete; + UpdateAgent(UpdateAgent&&) = delete; + UpdateAgent& operator=(const UpdateAgent&) = delete; + UpdateAgent& operator=(UpdateAgent&&) = delete; + + virtual bool isTargetSupported(const Uptane::Target& target) const = 0; + virtual bool getInstalledImageInfo(Uptane::InstalledImageInfo& installed_image_info) const = 0; + virtual data::InstallationResult install(const Uptane::Target& target) = 0; + + virtual void completeInstall() = 0; + virtual data::InstallationResult applyPendingInstall(const Uptane::Target& target) = 0; + + protected: + UpdateAgent() = default; +}; + +#endif // AKTUALIZR_SECONDARY_UPDATE_AGENT_H diff --git a/src/aktualizr_secondary/update_agent_file.cc b/src/aktualizr_secondary/update_agent_file.cc new file mode 100644 index 0000000000..2e3d8385ed --- /dev/null +++ b/src/aktualizr_secondary/update_agent_file.cc @@ -0,0 +1,146 @@ +#include "update_agent_file.h" + +#include + +#include +#include "crypto/crypto.h" +#include "logging/logging.h" +#include "uptane/manifest.h" + +// TODO(OTA-4939): Unify this with the check in +// SotaUptaneClient::getNewTargets() and make it more generic. +bool FileUpdateAgent::isTargetSupported(const Uptane::Target& target) const { return target.type() != "OSTREE"; } + +bool FileUpdateAgent::getInstalledImageInfo(Uptane::InstalledImageInfo& installed_image_info) const { + if (boost::filesystem::exists(target_filepath_)) { + auto file_content = Utils::readFile(target_filepath_); + + installed_image_info.name = current_target_name_; + installed_image_info.len = file_content.size(); + installed_image_info.hash = Uptane::ManifestIssuer::generateVersionHashStr(file_content); + } else { + // mimic the Primary's fake package manager behavior + auto unknown_target = Uptane::Target::Unknown(); + installed_image_info.name = unknown_target.filename(); + installed_image_info.len = unknown_target.length(); + installed_image_info.hash = unknown_target.sha256Hash(); + } + + return true; +} + +data::InstallationResult FileUpdateAgent::install(const Uptane::Target& target) { + if (!boost::filesystem::exists(new_target_filepath_)) { + LOG_ERROR << "The target image has not been received"; + return data::InstallationResult(data::ResultCode::Numeric::kDownloadFailed, + "The target image has not been received"); + } + + auto received_target_image_size = boost::filesystem::file_size(new_target_filepath_); + if (received_target_image_size != target.length()) { + LOG_ERROR << "Received image size does not match the size specified in Target metadata: " + << received_target_image_size << " != " << target.length(); + boost::filesystem::remove(new_target_filepath_); + return data::InstallationResult(data::ResultCode::Numeric::kDownloadFailed, + "Received image size does not match the size specified in Target metadata: " + + std::to_string(received_target_image_size) + + " != " + std::to_string(target.length())); + } + + if (!target.MatchHash(new_target_hasher_->getHash())) { + LOG_ERROR << "The received image's hash does not match the hash specified in Target metadata: " + << new_target_hasher_->getHash() << " != " << getTargetHash(target).HashString(); + return data::InstallationResult(data::ResultCode::Numeric::kDownloadFailed, + "The received image's hash does not match the hash specified in Target metadata: " + + new_target_hasher_->getHash().HashString() + + " != " + getTargetHash(target).HashString()); + } + + boost::filesystem::rename(new_target_filepath_, target_filepath_); + + if (boost::filesystem::exists(new_target_filepath_)) { + return data::InstallationResult(data::ResultCode::Numeric::kInstallFailed, + "The target image has not been installed"); + } + + if (!boost::filesystem::exists(target_filepath_)) { + return data::InstallationResult(data::ResultCode::Numeric::kInstallFailed, + "The target image has not been installed"); + } + + current_target_name_ = target.filename(); + new_target_hasher_.reset(); + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); +} + +void FileUpdateAgent::completeInstall() {} + +data::InstallationResult FileUpdateAgent::applyPendingInstall(const Uptane::Target& target) { + (void)target; + return data::InstallationResult(data::ResultCode::Numeric::kInternalError, + "Applying pending updates is not supported by the file update agent"); +} + +data::InstallationResult FileUpdateAgent::receiveData(const Uptane::Target& target, const uint8_t* data, size_t size) { + std::ofstream target_file(new_target_filepath_.c_str(), + std::ofstream::out | std::ofstream::binary | std::ofstream::app); + + if (!target_file.good()) { + LOG_ERROR << "Failed to open a new target image file"; + return data::InstallationResult(data::ResultCode::Numeric::kDownloadFailed, + "Failed to open a new target image file"); + } + + auto current_new_image_size = target_file.tellp(); + if (-1 == current_new_image_size) { + LOG_ERROR << "Failed to obtain a size of the new target image that is being uploaded"; + target_file.close(); + return data::InstallationResult(data::ResultCode::Numeric::kDownloadFailed, + "Failed to obtain a size of the new target image that is being uploaded"); + } + + if (static_cast(current_new_image_size) >= target.length()) { + LOG_ERROR << "The size of the received image data exceeds the expected Target image size: " + << current_new_image_size << " != " << target.length(); + target_file.close(); + return data::InstallationResult(data::ResultCode::Numeric::kDownloadFailed, + "The size of the received image data exceeds the expected Target image size: " + + std::to_string(current_new_image_size) + + " != " + std::to_string(target.length())); + } + + if (current_new_image_size == 0) { + new_target_hasher_ = MultiPartHasher::create(getTargetHash(target).type()); + } + + target_file.write(reinterpret_cast(data), static_cast(size)); + auto written_data_size = target_file.tellp() - current_new_image_size; + + if (written_data_size < 0 || static_cast(written_data_size) != size) { + LOG_ERROR << "The size of data written is not equal to the received data size: " << written_data_size + << " != " << size; + target_file.close(); + return data::InstallationResult(data::ResultCode::Numeric::kDownloadFailed, + "The size of data written is not equal to the received data size: " + + std::to_string(written_data_size) + " != " + std::to_string(size)); + } + + target_file.close(); + + auto total_size = current_new_image_size + written_data_size; + LOG_DEBUG << "Received and stored data of a new target image." + " Received in this request (bytes): " + << size << "; total received so far: " << total_size << "; expected total: " << target.length(); + if (static_cast(total_size) == target.length()) { + LOG_INFO << "Successfully received and stored new target image of " << total_size << " bytes."; + } + + new_target_hasher_->update(data, size); + + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); +} + +Hash FileUpdateAgent::getTargetHash(const Uptane::Target& target) { + // TODO(OTA-4831): check target.hashes() size. + return target.hashes()[0]; +} diff --git a/src/aktualizr_secondary/update_agent_file.h b/src/aktualizr_secondary/update_agent_file.h new file mode 100644 index 0000000000..5cded2f973 --- /dev/null +++ b/src/aktualizr_secondary/update_agent_file.h @@ -0,0 +1,31 @@ +#ifndef AKTUALIZR_SECONDARY_UPDATE_AGENT_FILE_H +#define AKTUALIZR_SECONDARY_UPDATE_AGENT_FILE_H + +#include "update_agent.h" + +class FileUpdateAgent : public UpdateAgent { + public: + FileUpdateAgent(boost::filesystem::path target_filepath, std::string target_name) + : target_filepath_{std::move(target_filepath)}, + new_target_filepath_{target_filepath_.string() + ".newtarget"}, + current_target_name_{std::move(target_name)} {} + + bool isTargetSupported(const Uptane::Target& target) const override; + bool getInstalledImageInfo(Uptane::InstalledImageInfo& installed_image_info) const override; + + virtual data::InstallationResult receiveData(const Uptane::Target& target, const uint8_t* data, size_t size); + data::InstallationResult install(const Uptane::Target& target) override; + + void completeInstall() override; + data::InstallationResult applyPendingInstall(const Uptane::Target& target) override; + + private: + static Hash getTargetHash(const Uptane::Target& target); + + const boost::filesystem::path target_filepath_; + const boost::filesystem::path new_target_filepath_; + std::string current_target_name_; + std::shared_ptr new_target_hasher_; +}; + +#endif // AKTUALIZR_SECONDARY_UPDATE_AGENT_FILE_H diff --git a/src/aktualizr_secondary/update_agent_ostree.cc b/src/aktualizr_secondary/update_agent_ostree.cc new file mode 100644 index 0000000000..3dbc654ba7 --- /dev/null +++ b/src/aktualizr_secondary/update_agent_ostree.cc @@ -0,0 +1,125 @@ +#include "update_agent_ostree.h" + +#include + +#include "logging/logging.h" +#include "package_manager/ostreemanager.h" + +// TODO: consider moving this and SecondaryProvider::getTreehubCredentials to +// encapsulate them in one shared place if possible. +static void extractCredentialsArchive(const std::string& archive, std::string* ca, std::string* cert, std::string* pkey, + std::string* treehub_server); + +// TODO(OTA-4939): Unify this with the check in +// SotaUptaneClient::getNewTargets() and make it more generic. +bool OstreeUpdateAgent::isTargetSupported(const Uptane::Target& target) const { return target.IsOstree(); } + +bool OstreeUpdateAgent::getInstalledImageInfo(Uptane::InstalledImageInfo& installed_image_info) const { + bool result = false; + try { + installed_image_info.len = 0; + installed_image_info.hash = ostreePackMan_->getCurrentHash(); + + // TODO(OTA-4545): consider more elegant way of storing currently installed target name + // usage of the SQLStorage and OSTree implementions aimed for Primary is + // a quite overhead for Secondary + auto currently_installed_target = ostreePackMan_->getCurrent(); + if (!currently_installed_target.IsValid()) { + // This is the policy on a target image name in case of OSTree + // The policy in followed and implied in meta-updater (garage-sign/push) and the backend + installed_image_info.name = targetname_prefix_ + "-" + installed_image_info.hash; + } else { + installed_image_info.name = currently_installed_target.filename(); + } + + result = true; + } catch (const std::exception& exc) { + LOG_ERROR << "Failed to get the currently installed revision: " << exc.what(); + } + return result; +} + +data::InstallationResult OstreeUpdateAgent::downloadTargetRev(const Uptane::Target& target, + const std::string& treehub_tls_creds) { + std::string treehub_server; + + try { + std::string ca; + std::string cert; + std::string pkey; + std::string server_url; + extractCredentialsArchive(treehub_tls_creds, &ca, &cert, &pkey, &server_url); + keyMngr_->loadKeys(&pkey, &cert, &ca); + boost::trim(server_url); + treehub_server = server_url; + } catch (std::runtime_error& exc) { + LOG_ERROR << exc.what(); + return data::InstallationResult(data::ResultCode::Numeric::kDownloadFailed, + std::string("Error loading Treehub credentials: ") + exc.what()); + } + + data::InstallationResult result; + const int max_tries = 3; + int tries = 0; + std::chrono::milliseconds wait(500); + + for (; tries < max_tries; tries++) { + result = OstreeManager::pull(sysrootPath_, treehub_server, *keyMngr_, target); + if (result.success) { + break; + } else if (tries < max_tries - 1) { + std::this_thread::sleep_for(wait); + wait *= 2; + } + } + if (!result.success) { + LOG_ERROR << "Download unsuccessful after " << tries << " attempts."; + } + + switch (result.result_code.num_code) { + case data::ResultCode::Numeric::kOk: { + LOG_INFO << "The target commit has been successfully downloaded: " << target.sha256Hash(); + break; + } + case data::ResultCode::Numeric::kAlreadyProcessed: { + LOG_INFO << "The target commit is already present on the local OSTree repo: " << target.sha256Hash(); + break; + } + default: { + LOG_ERROR << "Failed to download the target commit: " << target.sha256Hash() << " ( " + << result.result_code.ToString() << " ): " << result.description; + } + } + + return result; +} + +data::InstallationResult OstreeUpdateAgent::install(const Uptane::Target& target) { + return ostreePackMan_->install(target); +} + +void OstreeUpdateAgent::completeInstall() { ostreePackMan_->completeInstall(); } + +data::InstallationResult OstreeUpdateAgent::applyPendingInstall(const Uptane::Target& target) { + return ostreePackMan_->finalizeInstall(target); +} + +void extractCredentialsArchive(const std::string& archive, std::string* ca, std::string* cert, std::string* pkey, + std::string* treehub_server) { + { + std::stringstream as(archive); + *ca = Utils::readFileFromArchive(as, "ca.pem"); + } + { + std::stringstream as(archive); + *cert = Utils::readFileFromArchive(as, "client.pem"); + } + { + std::stringstream as(archive); + *pkey = Utils::readFileFromArchive(as, "pkey.pem"); + } + { + std::stringstream as(archive); + *treehub_server = Utils::readFileFromArchive(as, "server.url", true); + } +} diff --git a/src/aktualizr_secondary/update_agent_ostree.h b/src/aktualizr_secondary/update_agent_ostree.h new file mode 100644 index 0000000000..92303cd15a --- /dev/null +++ b/src/aktualizr_secondary/update_agent_ostree.h @@ -0,0 +1,36 @@ +#ifndef AKTUALIZR_SECONDARY_UPDATE_AGENT_OSTREE_H +#define AKTUALIZR_SECONDARY_UPDATE_AGENT_OSTREE_H + +#include "update_agent.h" + +class OstreeManager; +class KeyManager; + +class OstreeUpdateAgent : public UpdateAgent { + public: + OstreeUpdateAgent(boost::filesystem::path sysroot_path, std::shared_ptr& key_mngr, + std::shared_ptr& ostree_pack_man, std::string targetname_prefix) + : sysrootPath_(std::move(sysroot_path)), + keyMngr_(key_mngr), + ostreePackMan_(ostree_pack_man), + targetname_prefix_(std::move(targetname_prefix)) {} + + bool isTargetSupported(const Uptane::Target& target) const override; + bool getInstalledImageInfo(Uptane::InstalledImageInfo& installed_image_info) const override; + + data::InstallationResult downloadTargetRev(const Uptane::Target& target, const std::string& treehub_tls_creds); + + data::InstallationResult install(const Uptane::Target& target) override; + + void completeInstall() override; + + data::InstallationResult applyPendingInstall(const Uptane::Target& target) override; + + private: + boost::filesystem::path sysrootPath_; + std::shared_ptr keyMngr_; + std::shared_ptr ostreePackMan_; + const ::std::string targetname_prefix_; +}; + +#endif // AKTUALIZR_SECONDARY_UPDATE_AGENT_OSTREE_H diff --git a/src/aktualizr_secondary/update_test.cc b/src/aktualizr_secondary/update_test.cc deleted file mode 100644 index 56b63b9b9e..0000000000 --- a/src/aktualizr_secondary/update_test.cc +++ /dev/null @@ -1,84 +0,0 @@ -#include - -#include "aktualizr_secondary.h" -#include "uptane/secondaryinterface.h" - -#include -#include - -#include "config/config.h" -#include "storage/invstorage.h" -#include "utilities/utils.h" -std::string sysroot; - -class ShortCircuitSecondary : public Uptane::SecondaryInterface { - public: - ShortCircuitSecondary(AktualizrSecondary& sec) : secondary(sec) {} - ~ShortCircuitSecondary() override = default; - - Uptane::EcuSerial getSerial() override { return secondary.getSerialResp(); } - Uptane::HardwareIdentifier getHwId() override { return secondary.getHwIdResp(); } - PublicKey getPublicKey() override { return secondary.getPublicKeyResp(); } - Json::Value getManifest() override { return secondary.getManifestResp(); } - bool putMetadata(const Uptane::RawMetaPack& meta_pack) override { return secondary.putMetadataResp(meta_pack); } - int32_t getRootVersion(bool director) override { return secondary.getRootVersionResp(director); } - bool putRoot(const std::string& root, bool director) override { return secondary.putRootResp(root, director); } - bool sendFirmware(const std::shared_ptr& data) override { return secondary.sendFirmwareResp(data); } - - private: - AktualizrSecondary& secondary; -}; - -TEST(aktualizr_secondary_protocol, DISABLED_manual_update) { - // secondary - TemporaryDirectory temp_dir_sec; - AktualizrSecondaryConfig config; - config.network.port = 0; - config.storage.type = StorageType::kSqlite; - config.pacman.sysroot = sysroot; - auto storage = INvStorage::newStorage(config.storage); - - AktualizrSecondary as(config, storage); - - // secondary interface - ShortCircuitSecondary sec_iface{as}; - - // storage - TemporaryDirectory temp_dir; - Utils::copyDir("tests/test_data/secondary_meta", temp_dir.Path()); - StorageConfig storage2_config; - storage2_config.path = temp_dir.Path(); - auto storage2 = INvStorage::newStorage(storage2_config); - - Uptane::RawMetaPack metadata; - EXPECT_TRUE(storage2->loadLatestRoot(&metadata.director_root, Uptane::RepositoryType::Director())); - EXPECT_TRUE( - storage2->loadNonRoot(&metadata.director_targets, Uptane::RepositoryType::Director(), Uptane::Role::Targets())); - EXPECT_TRUE(storage2->loadLatestRoot(&metadata.image_root, Uptane::RepositoryType::Image())); - EXPECT_TRUE(storage2->loadNonRoot(&metadata.image_targets, Uptane::RepositoryType::Image(), Uptane::Role::Targets())); - EXPECT_TRUE( - storage2->loadNonRoot(&metadata.image_timestamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp())); - EXPECT_TRUE( - storage2->loadNonRoot(&metadata.image_snapshot, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot())); - - std::string firmware = Utils::readFile(temp_dir.Path() / "firmware.bin"); - - EXPECT_TRUE(sec_iface.putMetadata(metadata)); - EXPECT_TRUE(sec_iface.sendFirmware(std::make_shared(firmware))); - Json::Value manifest = sec_iface.getManifest(); - - EXPECT_EQ(manifest["signed"]["installed_image"]["fileinfo"]["hashes"]["sha256"], - boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha256digest(firmware)))); -} - -#ifndef __NO_MAIN__ -int main(int argc, char** argv) { - ::testing::InitGoogleTest(&argc, argv); - if (argc != 2) { - std::cerr << "Error: " << argv[0] << " requires the path to an OSTree sysroot as an input argument.\n"; - return EXIT_FAILURE; - } - sysroot = argv[1]; - return RUN_ALL_TESTS(); -} -#endif diff --git a/src/aktualizr_secondary/uptane_test.cc b/src/aktualizr_secondary/uptane_test.cc deleted file mode 100644 index 989e5a06ae..0000000000 --- a/src/aktualizr_secondary/uptane_test.cc +++ /dev/null @@ -1,77 +0,0 @@ -#include - -#include "aktualizr_secondary.h" -#include "primary/reportqueue.h" -#include "primary/sotauptaneclient.h" - -#include "config/config.h" -#include "httpfake.h" -#include "uptane_test_common.h" - -std::shared_ptr test_storage; -AktualizrSecondaryConfig test_config; -std::string test_sysroot; - -TEST(aktualizr_secondary_uptane, getSerial) { - test_config.pacman.sysroot = test_sysroot; - AktualizrSecondary as(test_config, test_storage); - - EXPECT_NE(as.getSerialResp(), Uptane::EcuSerial("hw")); -} - -TEST(aktualizr_secondary_uptane, getHwId) { - test_config.pacman.sysroot = test_sysroot; - AktualizrSecondary as(test_config, test_storage); - - EXPECT_NE(as.getHwIdResp(), Uptane::HardwareIdentifier("")); -} - -TEST(aktualizr_secondary_uptane, getPublicKey) { - test_config.pacman.sysroot = test_sysroot; - AktualizrSecondary as(test_config, test_storage); - - EXPECT_NO_THROW(as.getPublicKeyResp()); -} - -TEST(aktualizr_secondary_uptane, credentialsPassing) { - TemporaryDirectory temp_dir; - auto http = std::make_shared(temp_dir.Path()); - Config config; - config.storage.path = temp_dir.Path(); - boost::filesystem::copy_file("tests/test_data/cred.zip", (temp_dir / "cred.zip").string()); - config.provision.provision_path = temp_dir / "cred.zip"; - config.provision.mode = ProvisionMode::kSharedCred; - config.provision.primary_ecu_serial = "testecuserial"; - config.uptane.director_server = http->tls_server + "/director"; - config.uptane.repo_server = http->tls_server + "/repo"; - config.pacman.type = PackageManager::kNone; - - auto storage = INvStorage::newStorage(config.storage); - - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); - EXPECT_NO_THROW(sota_client->initialize()); - - std::string arch = sota_client->secondaryTreehubCredentials(); - std::string ca, cert, pkey, server_url; - EXPECT_NO_THROW(AktualizrSecondary::extractCredentialsArchive(arch, &ca, &cert, &pkey, &server_url)); -} - -#ifndef __NO_MAIN__ -int main(int argc, char **argv) { - ::testing::InitGoogleTest(&argc, argv); - - TemporaryDirectory temp_dir; - test_config.network.port = 0; // random port - test_config.storage.path = temp_dir.Path(); - test_config.storage.type = StorageType::kSqlite; - - test_storage = INvStorage::newStorage(test_config.storage); - - if (argc != 2) { - std::cerr << "Error: " << argv[0] << " requires the path to an OSTree sysroot as an input argument.\n"; - return EXIT_FAILURE; - } - test_sysroot = argv[1]; - return RUN_ALL_TESTS(); -} -#endif diff --git a/src/cert_provider/CMakeLists.txt b/src/cert_provider/CMakeLists.txt index 21f45513f6..a2a4627160 100644 --- a/src/cert_provider/CMakeLists.txt +++ b/src/cert_provider/CMakeLists.txt @@ -1,19 +1,21 @@ +set(CERT_PROVIDER_SRC main.cc) + # set the name of the executable -add_executable(aktualizr-cert-provider main.cc +add_executable(aktualizr-cert-provider ${CERT_PROVIDER_SRC} $ $ $ - $ $ + $ $ + $ $) -set_source_files_properties(main.cc PROPERTIES COMPILE_FLAGS -Wno-deprecated-declarations) - target_link_libraries(aktualizr-cert-provider - ${CMAKE_THREAD_LIBS_INIT} + Threads::Threads ${Boost_SYSTEM_LIBRARIES} ${Boost_LIBRARIES} + ${JSONCPP_LIBRARIES} ${LibArchive_LIBRARIES} ${LIBP11_LIBRARIES} ${CURL_LIBRARIES} @@ -21,11 +23,9 @@ target_link_libraries(aktualizr-cert-provider ${sodium_LIBRARY_RELEASE} ) -aktualizr_source_file_checks(main.cc) - add_dependencies(build_tests aktualizr-cert-provider) -install(TARGETS aktualizr-cert-provider RUNTIME DESTINATION bin COMPONENT garage_deploy) +install(TARGETS aktualizr-cert-provider RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT garage_deploy) add_aktualizr_test(NAME aktualizr_cert_provider SOURCES cert_provider_test.cc PROJECT_WORKING_DIRECTORY ARGS $) @@ -35,8 +35,16 @@ if (SOTA_PACKED_CREDENTIALS) set_tests_properties(test_aktualizr_cert_provider_shared_cred PROPERTIES LABELS "credentials") endif(SOTA_PACKED_CREDENTIALS) -aktualizr_source_file_checks(${AKTUALIZR_CERT_PROVIDER_SRC} - ${AKTUALIZR_CERT_HEADERS} +# Check the --help option works. +add_test(NAME aktualizr-cert-provider-option-help + COMMAND aktualizr-cert-provider --help) + +# Report version. +add_test(NAME aktualizr-cert-provider-option-version + COMMAND aktualizr-cert-provider --version) +set_tests_properties(aktualizr-cert-provider-option-version PROPERTIES PASS_REGULAR_EXPRESSION "Current aktualizr-cert-provider version is: ${AKTUALIZR_VERSION}") + +aktualizr_source_file_checks(${CERT_PROVIDER_SRC} cert_provider_shared_cred_test.cc cert_provider_test.cc cert_provider_test.h) diff --git a/src/cert_provider/cert_provider_shared_cred_test.cc b/src/cert_provider/cert_provider_shared_cred_test.cc index c1870df4ba..f689b45a65 100644 --- a/src/cert_provider/cert_provider_shared_cred_test.cc +++ b/src/cert_provider/cert_provider_shared_cred_test.cc @@ -1,9 +1,10 @@ #include +#include #include #include "cert_provider_test.h" -#include "config/config.h" +#include "libaktualizr/config.h" #include "utilities/utils.h" static boost::filesystem::path CERT_PROVIDER_PATH; @@ -12,16 +13,17 @@ static boost::filesystem::path CREDENTIALS_PATH; class AktualizrCertProviderTest : public ::testing::Test { protected: struct TestArgs { - TestArgs(const TemporaryDirectory& tmp_dir, const std::string& cred_path) - : test_dir{tmp_dir.PathString()}, credentials_path(cred_path) {} + TestArgs(const TemporaryDirectory& tmp_dir, const std::string& cred_path_in) + : test_dir{tmp_dir.PathString()}, credentials_path(tmp_dir.Path() / "credentials.zip") { + boost::filesystem::copy_file(cred_path_in, credentials_path); + } const std::string test_dir; const std::string fleet_ca_cert = "tests/test_data/CAcert.pem"; const std::string fleet_ca_private_key = "tests/test_data/CApkey.pem"; - const std::string credentials_path; + const boost::filesystem::path credentials_path; }; - protected: TemporaryDirectory tmp_dir_; TestArgs test_args_{tmp_dir_, CREDENTIALS_PATH.string()}; DeviceCredGenerator device_cred_gen_{CERT_PROVIDER_PATH.string()}; @@ -38,15 +40,9 @@ class AktualizrCertProviderTest : public ::testing::Test { * - [x] Provide server URL if requested */ TEST_F(AktualizrCertProviderTest, SharedCredProvisioning) { - if (test_args_.credentials_path.empty()) { - // GTEST_SKIP() was introduced in recent gtest version; - SUCCEED() << "A path to the credentials file hasn't been proided, so skip the test"; - return; - } - DeviceCredGenerator::ArgSet args; - args.credentialFile = test_args_.credentials_path; + args.credentialFile = test_args_.credentials_path.string(); args.localDir = test_args_.test_dir; args.provideRootCA.set(); args.provideServerURL.set(); diff --git a/src/cert_provider/cert_provider_test.cc b/src/cert_provider/cert_provider_test.cc index 720580fe2a..913ccecab4 100644 --- a/src/cert_provider/cert_provider_test.cc +++ b/src/cert_provider/cert_provider_test.cc @@ -1,9 +1,11 @@ #include +#include #include #include "cert_provider_test.h" -#include "config/config.h" +#include "crypto/crypto.h" +#include "libaktualizr/config.h" #include "utilities/utils.h" static boost::filesystem::path CERT_PROVIDER_PATH; @@ -199,8 +201,8 @@ TEST_F(AktualizrCertProviderTest, ConfigFilePathUsage) { Config config; config.import.base_path = base_path; - config.import.tls_pkey_path = BasedPath(private_key_file); - config.import.tls_clientcert_path = BasedPath(cert_file); + config.import.tls_pkey_path = utils::BasedPath(private_key_file); + config.import.tls_clientcert_path = utils::BasedPath(cert_file); auto test_conf_file = tmp_dir_ / "conf.toml"; boost::filesystem::ofstream conf_file(test_conf_file); diff --git a/src/cert_provider/cert_provider_test.h b/src/cert_provider/cert_provider_test.h index c0c6ddca3f..eee1bac917 100644 --- a/src/cert_provider/cert_provider_test.h +++ b/src/cert_provider/cert_provider_test.h @@ -88,7 +88,7 @@ class DeviceCredGenerator : public Process { privateKeyFileFullPath{(rootDir / directory / privateKeyFile)}, certFileFullPath{rootDir / directory / certFile}, serverRootCAFullPath{rootDir / directory / serverRootCA}, - gtwURLFileFullPath{rootDir / gtwURLFile} {} + gtwURLFileFullPath{rootDir / directory / gtwURLFile} {} const std::string directory; const std::string privateKeyFile; diff --git a/src/cert_provider/main.cc b/src/cert_provider/main.cc index 3033ec6c5a..e280645586 100644 --- a/src/cert_provider/main.cc +++ b/src/cert_provider/main.cc @@ -1,26 +1,24 @@ -#include +#include #include -#include #include #include #include #include -#include #include "json/json.h" #include "bootstrap/bootstrap.h" -#include "config/config.h" #include "crypto/crypto.h" #include "http/httpclient.h" +#include "libaktualizr/config.h" #include "logging/logging.h" #include "utilities/aktualizr_version.h" #include "utilities/utils.h" namespace bpo = boost::program_options; -void check_info_options(const bpo::options_description& description, const bpo::variables_map& vm) { +void checkInfoOptions(const bpo::options_description& description, const bpo::variables_map& vm) { if (vm.count("help") != 0) { std::cout << description << '\n'; exit(EXIT_SUCCESS); @@ -31,7 +29,7 @@ void check_info_options(const bpo::options_description& description, const bpo:: } } -bpo::variables_map parse_options(int argc, char* argv[]) { +bpo::variables_map parseOptions(int argc, char** argv) { bpo::options_description description("aktualizr-cert-provider command line options"); // clang-format off description.add_options() @@ -45,12 +43,12 @@ bpo::variables_map parse_options(int argc, char* argv[]) { ("certificate-c", bpo::value(), "value for C field in certificate subject name") ("certificate-st", bpo::value(), "value for ST field in certificate subject name") ("certificate-o", bpo::value(), "value for O field in certificate subject name") - ("certificate-cn", bpo::value(), "value for CN field in certificate subject name") + ("certificate-cn", bpo::value(), "value for CN field in certificate subject name (used for device ID)") ("target,t", bpo::value(), "target device to scp credentials to (or [user@]host)") ("port,p", bpo::value(), "target port") - ("directory,d", bpo::value(), "directory on target to write credentials to (conflicts with -config)") - ("root-ca,r", "provide root CA") - ("server-url,u", "provide server url file") + ("directory,d", bpo::value(), "directory on target to write credentials to (conflicts with --config)") + ("root-ca,r", "provide root CA certificate") + ("server-url,u", "provide server URL file") ("local,l", bpo::value(), "local directory to write credentials to") ("config,g", bpo::value >()->composing(), "configuration file or directory from which to get file names") ("skip-checks,s", "skip strict host key checking for ssh/scp commands"); @@ -62,7 +60,7 @@ bpo::variables_map parse_options(int argc, char* argv[]) { bpo::basic_parsed_options parsed_options = bpo::command_line_parser(argc, argv).options(description).allow_unregistered().run(); bpo::store(parsed_options, vm); - check_info_options(description, vm); + checkInfoOptions(description, vm); bpo::notify(vm); unregistered_options = bpo::collect_unrecognized(parsed_options.options, bpo::include_positional); if (vm.count("help") == 0 && !unregistered_options.empty()) { @@ -74,7 +72,7 @@ bpo::variables_map parse_options(int argc, char* argv[]) { std::cout << ex.what() << std::endl << description; exit(EXIT_FAILURE); } catch (const bpo::error& ex) { - check_info_options(description, vm); + checkInfoOptions(description, vm); // print the error message to the standard output too, as the user provided // a non-supported commandline option @@ -88,188 +86,6 @@ bpo::variables_map parse_options(int argc, char* argv[]) { return vm; } -// I miss Rust's ? operator -#define SSL_ERROR(description) \ - { \ - std::cerr << (description) << ERR_error_string(ERR_get_error(), nullptr) << std::endl; \ - return false; \ - } -bool generate_and_sign(const std::string& cacert_path, const std::string& capkey_path, std::string* pkey, - std::string* cert, const bpo::variables_map& commandline_map) { - int rsa_bits = 2048; - if (commandline_map.count("bits") != 0) { - rsa_bits = (commandline_map["bits"].as()); - if (rsa_bits < 31) { // sic! - std::cerr << "RSA key size can't be smaller than 31 bits" << std::endl; - return false; - } - } - - int cert_days = 365; - if (commandline_map.count("days") != 0) { - cert_days = (commandline_map["days"].as()); - } - - std::string newcert_c; - if (commandline_map.count("certificate-c") != 0) { - newcert_c = (commandline_map["certificate-c"].as()); - if (newcert_c.length() != 2) { - std::cerr << "Country code (--certificate-c) should be 2 characters long" << std::endl; - return false; - } - }; - - std::string newcert_st; - if (commandline_map.count("certificate-st") != 0) { - newcert_st = (commandline_map["certificate-st"].as()); - if (newcert_st.empty()) { - std::cerr << "State name (--certificate-st) can't be empty" << std::endl; - return false; - } - }; - - std::string newcert_o; - if (commandline_map.count("certificate-o") != 0) { - newcert_o = (commandline_map["certificate-o"].as()); - if (newcert_o.empty()) { - std::cerr << "Organization name (--certificate-o) can't be empty" << std::endl; - return false; - } - }; - - std::string newcert_cn; - if (commandline_map.count("certificate-cn") != 0) { - newcert_cn = (commandline_map["certificate-cn"].as()); - if (newcert_cn.empty()) { - std::cerr << "Common name (--certificate-cn) can't be empty" << std::endl; - return false; - } - } else { - newcert_cn = Utils::genPrettyName(); - } - - // read CA certificate - std::string cacert_contents = Utils::readFile(cacert_path); - StructGuard bio_in_cacert(BIO_new_mem_buf(cacert_contents.c_str(), static_cast(cacert_contents.size())), - BIO_free_all); - StructGuard ca_certificate(PEM_read_bio_X509(bio_in_cacert.get(), nullptr, nullptr, nullptr), X509_free); - if (ca_certificate.get() == nullptr) { - std::cerr << "Reading CA certificate failed.\n"; - return false; - } - - // read CA private key - std::string capkey_contents = Utils::readFile(capkey_path); - StructGuard bio_in_capkey(BIO_new_mem_buf(capkey_contents.c_str(), static_cast(capkey_contents.size())), - BIO_free_all); - StructGuard ca_privkey(PEM_read_bio_PrivateKey(bio_in_capkey.get(), nullptr, nullptr, nullptr), - EVP_PKEY_free); - if (ca_privkey.get() == nullptr) SSL_ERROR("PEM_read_bio_PrivateKey failed: "); - - // create certificate - StructGuard certificate(X509_new(), X509_free); - if (certificate.get() == nullptr) SSL_ERROR("X509_new failed: "); - - X509_set_version(certificate.get(), 2); // X509v3 - - { - std::random_device urandom; - std::uniform_int_distribution<> serial_dist(0, (1UL << 20) - 1); - ASN1_INTEGER_set(X509_get_serialNumber(certificate.get()), serial_dist(urandom)); - } - - // create and set certificate subject name - StructGuard subj(X509_NAME_new(), X509_NAME_free); - if (subj.get() == nullptr) SSL_ERROR("X509_NAME_new failed: "); - - if (!newcert_c.empty()) { - if (X509_NAME_add_entry_by_txt(subj.get(), "C", MBSTRING_ASC, - reinterpret_cast(newcert_c.c_str()), -1, -1, 0) == 0) - SSL_ERROR("X509_NAME_add_entry_by_txt failed: "); - } - - if (!newcert_st.empty()) { - if (X509_NAME_add_entry_by_txt(subj.get(), "ST", MBSTRING_ASC, - reinterpret_cast(newcert_st.c_str()), -1, -1, 0) == 0) - SSL_ERROR("X509_NAME_add_entry_by_txt failed: "); - } - - if (!newcert_o.empty()) { - if (X509_NAME_add_entry_by_txt(subj.get(), "O", MBSTRING_ASC, - reinterpret_cast(newcert_o.c_str()), -1, -1, 0) == 0) - SSL_ERROR("X509_NAME_add_entry_by_txt failed: "); - } - - assert(!newcert_cn.empty()); - if (X509_NAME_add_entry_by_txt(subj.get(), "CN", MBSTRING_ASC, - reinterpret_cast(newcert_cn.c_str()), -1, -1, 0) == 0) - SSL_ERROR("X509_NAME_add_entry_by_txt failed: "); - - if (X509_set_subject_name(certificate.get(), subj.get()) == 0) SSL_ERROR("X509_set_subject_name failed: "); - - // set issuer name - X509_NAME* ca_subj = X509_get_subject_name(ca_certificate.get()); - if (ca_subj == nullptr) SSL_ERROR("X509_get_subject_name failed: "); - - if (X509_set_issuer_name(certificate.get(), ca_subj) == 0) SSL_ERROR("X509_set_issuer_name failed: "); - - // create and set key - - // freed by owner EVP_PKEY - RSA* certificate_rsa = RSA_generate_key(rsa_bits, RSA_F4, nullptr, nullptr); - if (certificate_rsa == nullptr) SSL_ERROR("RSA_generate_key failed: "); - - StructGuard certificate_pkey(EVP_PKEY_new(), EVP_PKEY_free); - if (certificate_pkey.get() == nullptr) SSL_ERROR("EVP_PKEY_new failed: "); - - if (!EVP_PKEY_assign_RSA(certificate_pkey.get(), certificate_rsa)) // NOLINT - SSL_ERROR("EVP_PKEY_assign_RSA failed: "); - - if (X509_set_pubkey(certificate.get(), certificate_pkey.get()) == 0) SSL_ERROR("X509_set_pubkey failed: "); - - // set validity period - if (X509_gmtime_adj(X509_get_notBefore(certificate.get()), 0) == nullptr) SSL_ERROR("X509_gmtime_adj failed: "); - - if (X509_gmtime_adj(X509_get_notAfter(certificate.get()), 60L * 60L * 24L * cert_days) == nullptr) - SSL_ERROR("X509_gmtime_adj failed: "); - - // sign - const EVP_MD* cert_digest = EVP_sha256(); - if (X509_sign(certificate.get(), ca_privkey.get(), cert_digest) == 0) SSL_ERROR("X509_sign failed: "); - - // serialize private key - char* privkey_buf; - StructGuard privkey_file(BIO_new(BIO_s_mem()), BIO_vfree); - if (privkey_file == nullptr) { - std::cerr << "Error opening memstream" << std::endl; - return false; - } - int ret = PEM_write_bio_RSAPrivateKey(privkey_file.get(), certificate_rsa, nullptr, nullptr, 0, nullptr, nullptr); - if (ret == 0) { - std::cerr << "PEM_write_RSAPrivateKey" << std::endl; - return false; - } - auto privkey_len = BIO_get_mem_data(privkey_file.get(), &privkey_buf); // NOLINT - *pkey = std::string(privkey_buf, static_cast(privkey_len)); - - // serialize certificate - char* cert_buf; - StructGuard cert_file(BIO_new(BIO_s_mem()), BIO_vfree); - if (cert_file == nullptr) { - std::cerr << "Error opening memstream" << std::endl; - return false; - } - ret = PEM_write_bio_X509(cert_file.get(), certificate.get()); - if (ret == 0) { - std::cerr << "PEM_write_X509" << std::endl; - return false; - } - auto cert_len = BIO_get_mem_data(cert_file.get(), &cert_buf); // NOLINT - *cert = std::string(cert_buf, static_cast(cert_len)); - - return true; -} - class SSHRunner { public: SSHRunner(std::string target, const bool skip_checks, const int port = 22) @@ -342,7 +158,7 @@ int main(int argc, char* argv[]) { logger_set_threshold(static_cast(2)); try { - bpo::variables_map commandline_map = parse_options(argc, argv); + bpo::variables_map commandline_map = parseOptions(argc, argv); std::string target; if (commandline_map.count("target") != 0) { @@ -376,7 +192,7 @@ int main(int argc, char* argv[]) { if (fleet_ca_path.empty() != fleet_ca_key_path.empty()) { std::cerr << "fleet-ca and fleet-ca-key options should be used together" << std::endl; - return 1; + return EXIT_FAILURE; } if (!commandline_map["directory"].empty() && !commandline_map["config"].empty()) { @@ -398,20 +214,32 @@ int main(int argc, char* argv[]) { if ((fleet_ca_path.empty() || provide_ca || provide_url) && credentials_path.empty()) { std::cerr << "Error: missing -c/--credentials parameters which is mandatory if the fleet CA is not specified or an " - "output of the root CA or a gateway URL is requested"; + "output of the root CA or a gateway URL is requested" + << std::endl; return EXIT_FAILURE; } else { serverUrl = Bootstrap::readServerUrl(credentials_path); } + std::string device_id; + if (commandline_map.count("certificate-cn") != 0) { + device_id = (commandline_map["certificate-cn"].as()); + if (device_id.empty()) { + std::cerr << "Common name (device ID, --certificate-cn) can't be empty" << std::endl; + return EXIT_FAILURE; + } + } else { + device_id = Utils::genPrettyName(); + std::cout << "Random device ID is " << device_id << "\n"; + } + boost::filesystem::path directory = "/var/sota/import"; - BasedPath pkey_file = BasedPath("pkey.pem"); - BasedPath cert_file = BasedPath("client.pem"); - BasedPath ca_file = BasedPath("root.crt"); - BasedPath url_file = BasedPath("gateway.url"); + utils::BasedPath pkey_file = utils::BasedPath("pkey.pem"); + utils::BasedPath cert_file = utils::BasedPath("client.pem"); + utils::BasedPath ca_file = utils::BasedPath("root.crt"); + utils::BasedPath url_file = utils::BasedPath("gateway.url"); if (!config_path.empty()) { Config config(config_path); - // TODO: provide path to root directory in `--local` parameter // try first import base path and then storage path if (!config.import.base_path.empty()) { @@ -438,7 +266,7 @@ int main(int argc, char* argv[]) { ca_file = config.storage.tls_cacert_path; } } - if (provide_url) { + if (provide_url && !config.tls.server_url_path.empty()) { url_file = config.tls.server_url_path; } } @@ -457,10 +285,6 @@ int main(int argc, char* argv[]) { std::string ca; if (fleet_ca_path.empty()) { // no fleet CA => provision with shared credentials - - std::string device_id = Utils::genPrettyName(); - std::cout << "Random device ID is " << device_id << "\n"; - Bootstrap boot(credentials_path, ""); HttpClient http; Json::Value data; @@ -489,10 +313,48 @@ int main(int argc, char* argv[]) { return EXIT_FAILURE; } } else { // fleet CA set => generate and sign a new certificate - if (!generate_and_sign(fleet_ca_path.native(), fleet_ca_key_path.native(), &pkey, &cert, commandline_map)) { - return EXIT_FAILURE; + int rsa_bits = 2048; + if (commandline_map.count("bits") != 0) { + rsa_bits = (commandline_map["bits"].as()); + } + + int cert_days = 365; + if (commandline_map.count("days") != 0) { + cert_days = (commandline_map["days"].as()); + } + + std::string newcert_c; + if (commandline_map.count("certificate-c") != 0) { + newcert_c = (commandline_map["certificate-c"].as()); + if (newcert_c.length() != 2) { + std::cerr << "Country code (--certificate-c) should be 2 characters long" << std::endl; + return EXIT_FAILURE; + } } + std::string newcert_st; + if (commandline_map.count("certificate-st") != 0) { + newcert_st = (commandline_map["certificate-st"].as()); + if (newcert_st.empty()) { + std::cerr << "State name (--certificate-st) can't be empty" << std::endl; + return EXIT_FAILURE; + } + } + + std::string newcert_o; + if (commandline_map.count("certificate-o") != 0) { + newcert_o = (commandline_map["certificate-o"].as()); + if (newcert_o.empty()) { + std::cerr << "Organization name (--certificate-o) can't be empty" << std::endl; + return EXIT_FAILURE; + } + } + + StructGuard certificate = + Crypto::generateCert(rsa_bits, cert_days, newcert_c, newcert_st, newcert_o, device_id); + Crypto::signCert(fleet_ca_path.native(), fleet_ca_key_path.native(), certificate.get()); + Crypto::serializeCert(&pkey, &cert, certificate.get()); + if (provide_ca) { // Read server root CA from server_ca.pem in archive if found (to support // community edition use case). Otherwise, default to the old version of @@ -532,7 +394,7 @@ int main(int argc, char* argv[]) { copyLocal(tmp_ca_file.PathString(), root_ca_file); } if (provide_url) { - auto gtw_url_file = local_dir / url_file.get(""); + auto gtw_url_file = local_dir / url_file.get(directory); std::cout << "Writing the gateway URL to " << gtw_url_file << " ...\n"; copyLocal(tmp_url_file.PathString(), gtw_url_file); } diff --git a/src/libaktualizr-c/CMakeLists.txt b/src/libaktualizr-c/CMakeLists.txt index 0982c587d4..7fcf721275 100644 --- a/src/libaktualizr-c/CMakeLists.txt +++ b/src/libaktualizr-c/CMakeLists.txt @@ -1,9 +1,25 @@ -SET(TARGET_NAME aktualizr-c) SET(SOURCES libaktualizr-c.cc) -add_library(${TARGET_NAME} SHARED ${SOURCES}) -target_include_directories(${TARGET_NAME} PUBLIC ${PROJECT_SOURCE_DIR}/include) -target_link_libraries(${TARGET_NAME} PRIVATE aktualizr_static_lib ${AKTUALIZR_EXTERNAL_LIBS}) +add_library(aktualizr-c SHARED ${SOURCES} + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $ + $) + +target_include_directories(aktualizr-c PUBLIC ${PROJECT_SOURCE_DIR}/include) +target_link_libraries(aktualizr-c PRIVATE ${AKTUALIZR_EXTERNAL_LIBS}) aktualizr_source_file_checks(${SOURCES}) diff --git a/src/libaktualizr-c/libaktualizr-c.cc b/src/libaktualizr-c/libaktualizr-c.cc index c4918db0e3..cee397bcc1 100644 --- a/src/libaktualizr-c/libaktualizr-c.cc +++ b/src/libaktualizr-c/libaktualizr-c.cc @@ -1,10 +1,13 @@ #include "libaktualizr-c.h" +#include -Aktualizr *Aktualizr_create(const char *config_path) { +#include "libaktualizr/events.h" +#include "utilities/utils.h" + +Aktualizr *Aktualizr_create_from_cfg(Config *cfg) { Aktualizr *a; try { - Config cfg(config_path); - a = new Aktualizr(cfg); + a = new Aktualizr(*cfg); } catch (const std::exception &e) { std::cerr << "Aktualizr_create exception: " << e.what() << std::endl; return nullptr; @@ -12,6 +15,16 @@ Aktualizr *Aktualizr_create(const char *config_path) { return a; } +Aktualizr *Aktualizr_create_from_path(const char *config_path) { + try { + Config cfg(config_path); + return Aktualizr_create_from_cfg(&cfg); + } catch (const std::exception &e) { + std::cerr << "Aktualizr_create exception: " << e.what() << std::endl; + return nullptr; + } +} + int Aktualizr_initialize(Aktualizr *a) { try { a->Initialize(); @@ -34,7 +47,28 @@ int Aktualizr_uptane_cycle(Aktualizr *a) { void Aktualizr_destroy(Aktualizr *a) { delete a; } -Campaign *Aktualizr_campaign_check(Aktualizr *a) { +static void handler_wrapper(const std::shared_ptr &event, void (*handler)(const char *)) { + if (handler == nullptr) { + std::cerr << "handler_wrapper error: no external handler" << std::endl; + return; + } + + (*handler)(event->variant.c_str()); +} + +int Aktualizr_set_signal_handler(Aktualizr *a, void (*handler)(const char *event_name)) { + try { + auto functor = std::bind(handler_wrapper, std::placeholders::_1, handler); + a->SetSignalHandler(functor); + + } catch (const std::exception &e) { + std::cerr << "Aktualizr_set_signal_handler exception: " << e.what() << std::endl; + return -1; + } + return 0; +} + +Campaign *Aktualizr_campaigns_check(Aktualizr *a) { try { auto r = a->CampaignCheck().get(); if (!r.campaigns.empty()) { @@ -47,6 +81,7 @@ Campaign *Aktualizr_campaign_check(Aktualizr *a) { } return nullptr; } + int Aktualizr_campaign_accept(Aktualizr *a, Campaign *c) { try { a->CampaignControl(c->id, campaign::Cmd::Accept).get(); @@ -56,6 +91,7 @@ int Aktualizr_campaign_accept(Aktualizr *a, Campaign *c) { } return 0; } + int Aktualizr_campaign_postpone(Aktualizr *a, Campaign *c) { try { a->CampaignControl(c->id, campaign::Cmd::Postpone).get(); @@ -65,6 +101,7 @@ int Aktualizr_campaign_postpone(Aktualizr *a, Campaign *c) { } return 0; } + int Aktualizr_campaign_decline(Aktualizr *a, Campaign *c) { try { a->CampaignControl(c->id, campaign::Cmd::Decline).get(); @@ -74,4 +111,157 @@ int Aktualizr_campaign_decline(Aktualizr *a, Campaign *c) { } return 0; } + void Aktualizr_campaign_free(Campaign *c) { delete c; } + +Updates *Aktualizr_updates_check(Aktualizr *a) { + try { + auto r = a->CheckUpdates().get(); + return (!r.updates.empty()) ? new Updates(std::move(r.updates)) : nullptr; + } catch (const std::exception &e) { + std::cerr << "Campaign decline exception: " << e.what() << std::endl; + return nullptr; + } +} + +void Aktualizr_updates_free(Updates *u) { delete u; } + +size_t Aktualizr_get_targets_num(Updates *u) { return (u == nullptr) ? 0 : u->size(); } + +Target *Aktualizr_get_nth_target(Updates *u, size_t n) { + try { + if (u != nullptr) { + return &u->at(n); + } else { + return nullptr; + } + } catch (const std::exception &e) { + std::cerr << "Exception: " << e.what() << std::endl; + return nullptr; + } +} + +// TODO: Would it be nicer if t->filename returned const ref? +const char *Aktualizr_get_target_name(Target *t) { + if (t != nullptr) { + auto length = t->filename().length(); + auto *name = new char[length + 1]; + strncpy(name, t->filename().c_str(), length + 1); + return name; + } else { + return nullptr; + } +} + +void Aktualizr_free_target_name(const char *n) { delete[] n; } + +int Aktualizr_download_target(Aktualizr *a, Target *t) { + try { + a->Download(std::vector({*t})).get(); + } catch (const std::exception &e) { + std::cerr << "Campaign decline exception: " << e.what() << std::endl; + return -1; + } + return 0; +} + +int Aktualizr_install_target(Aktualizr *a, Target *t) { + try { + a->Install(std::vector({*t})).get(); + } catch (const std::exception &e) { + std::cerr << "Campaign decline exception: " << e.what() << std::endl; + return -1; + } + return 0; +} + +int Aktualizr_send_manifest(Aktualizr *a, const char *manifest) { + try { + Json::Value custom = Utils::parseJSON(manifest); + bool r = a->SendManifest(custom).get(); + return r ? 0 : -1; + } catch (const std::exception &e) { + std::cerr << "Aktualizr_send_manifest exception: " << e.what() << std::endl; + return -1; + } +} + +int Aktualizr_send_device_data(Aktualizr *a) { + try { + a->SendDeviceData().get(); + return 0; + } catch (const std::exception &e) { + std::cerr << "Aktualizr_send_device_data exception: " << e.what() << std::endl; + return -1; + } +} + +StorageTargetHandle *Aktualizr_open_stored_target(Aktualizr *a, const Target *t) { + if (t == nullptr) { + std::cerr << "Aktualizr_open_stored_target failed: invalid input" << std::endl; + return nullptr; + } + + try { + auto *stream = new auto(a->OpenStoredTarget(*t)); + return stream; + } catch (const std::exception &e) { + std::cerr << "Aktualizr_open_stored_target exception: " << e.what() << std::endl; + return nullptr; + } +} + +size_t Aktualizr_read_stored_target(StorageTargetHandle *handle, uint8_t *buf, size_t size) { + if (handle != nullptr && buf != nullptr) { + handle->read(reinterpret_cast(buf), static_cast(size)); + return static_cast(handle->gcount()); + } else { + std::cerr << "Aktualizr_read_stored_target failed: invalid input " << (handle == nullptr ? "handle" : "buffer") + << std::endl; + return 0; + } +} + +int Aktualizr_close_stored_target(StorageTargetHandle *handle) { + if (handle != nullptr) { + handle->close(); + delete handle; + return 0; + } else { + std::cerr << "Aktualizr_close_stored_target failed: no input handle" << std::endl; + return -1; + } +} + +static Pause_Status_C get_Pause_Status_C(result::PauseStatus in) { + switch (in) { + case result::PauseStatus::kSuccess: { + return Pause_Status_C::kSuccess; + } + case result::PauseStatus::kAlreadyPaused: { + return Pause_Status_C::kAlreadyPaused; + } + case result::PauseStatus::kAlreadyRunning: { + return Pause_Status_C::kAlreadyRunning; + } + case result::PauseStatus::kError: { + return Pause_Status_C::kError; + } + default: { + assert(false); + return Pause_Status_C::kError; + } + } +} + +Pause_Status_C Aktualizr_pause(Aktualizr *a) { + result::Pause pause = a->Pause(); + return ::get_Pause_Status_C(pause.status); +} + +Pause_Status_C Aktualizr_resume(Aktualizr *a) { + result::Pause pause = a->Resume(); + return ::get_Pause_Status_C(pause.status); +} + +void Aktualizr_abort(Aktualizr *a) { a->Abort(); } diff --git a/src/libaktualizr-c/test/CMakeLists.txt b/src/libaktualizr-c/test/CMakeLists.txt index 352b38cfaf..23738dc98f 100644 --- a/src/libaktualizr-c/test/CMakeLists.txt +++ b/src/libaktualizr-c/test/CMakeLists.txt @@ -1,10 +1,23 @@ -SET(TARGET_NAME api-test) -SET(SOURCES api-test.c) +SET(REPO_PATH ${PROJECT_BINARY_DIR}/uptane_repos/c-api-test-repo) -SET(CMAKE_SKIP_RPATH TRUE) +if(CMAKE_CROSSCOMPILING) + find_program(UPTANE_GENERATOR NAMES uptane-generator) +else() + set(UPTANE_GENERATOR $) +endif() -add_executable(${TARGET_NAME} EXCLUDE_FROM_ALL ${SOURCES}) -add_dependencies(build_tests ${TARGET_NAME}) -target_link_libraries(${TARGET_NAME} aktualizr-c) +add_custom_target(api-test-resources-generation + COMMAND ${PROJECT_SOURCE_DIR}/tests/uptane_repo_generation/generate_repo.sh + ${UPTANE_GENERATOR} + ${REPO_PATH} + --add_campaigns) +add_dependencies(api-test-resources-generation uptane-generator) -aktualizr_source_file_checks(${SOURCES}) +list(REMOVE_ITEM TEST_LIBS aktualizr_lib testutilities) +add_aktualizr_test(NAME c_api SOURCES api-test.c PROJECT_WORKING_DIRECTORY + ARGS ${PROJECT_SOURCE_DIR}/tests/fake_http_server/fake_test_server.py ${REPO_PATH} + LIBRARIES api-test-utils aktualizr-c) +add_dependencies(t_c_api api-test-resources-generation) +aktualizr_source_file_checks(api-test.c) + +add_subdirectory(api-test-utils EXCLUDE_FROM_ALL) diff --git a/src/libaktualizr-c/test/api-test-utils/CMakeLists.txt b/src/libaktualizr-c/test/api-test-utils/CMakeLists.txt new file mode 100644 index 0000000000..72a0f6ef14 --- /dev/null +++ b/src/libaktualizr-c/test/api-test-utils/CMakeLists.txt @@ -0,0 +1,12 @@ +SET(TARGET_NAME api-test-utils) +SET(SOURCES api-test-utils.cc) +SET(HEADERS api-test-utils.h) + +SET(CMAKE_SKIP_RPATH TRUE) + +add_library(${TARGET_NAME} SHARED ${SOURCES}) +target_include_directories(${TARGET_NAME} PUBLIC ${PROJECT_SOURCE_DIR}/tests) +target_link_libraries(${TARGET_NAME} testutilities) +add_dependencies(build_tests ${TARGET_NAME}) + +aktualizr_source_file_checks(${SOURCES} ${HEADERS}) diff --git a/src/libaktualizr-c/test/api-test-utils/api-test-utils.cc b/src/libaktualizr-c/test/api-test-utils/api-test-utils.cc new file mode 100644 index 0000000000..74fb039f06 --- /dev/null +++ b/src/libaktualizr-c/test/api-test-utils/api-test-utils.cc @@ -0,0 +1,44 @@ +#include "api-test-utils.h" + +#include +#include "libaktualizr/config.h" +#include "test_utils.h" +#include "utilities/utils.h" + +std::string serverAddress; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) +std::unique_ptr server; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) +std::unique_ptr temp_dir; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) + +void Run_fake_http_server(const char *serverPath, const char *metaPath) { + std::string port = TestUtils::getFreePort(); + serverAddress = "http://127.0.0.1:" + port; + + // NOLINTNEXTLINE(clang-analyzer-core.NonNullParamChecker) + server = std_::make_unique(serverPath, port, "-f", "-m", metaPath); + TestUtils::waitForServer(serverAddress + "/"); +} + +// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage) +void Stop_fake_http_server() { server.reset(); } + +Config *Get_test_config() { + auto *config = new Config(); + + config->tls.server = serverAddress; + + config->provision.primary_ecu_serial = "CA:FE:A6:D2:84:9D"; + config->provision.primary_ecu_hardware_id = "primary_hw"; + config->provision.server = serverAddress; + config->provision.provision_path = "tests/test_data/cred.zip"; + config->provision.mode = ProvisionMode::kSharedCredReuse; + + temp_dir = std_::make_unique(); + config->storage.path = temp_dir->Path(); + config->pacman.type = PACKAGE_MANAGER_NONE; + config->pacman.images_path = temp_dir->Path() / "images"; + + config->postUpdateValues(); + return config; +} + +void Remove_test_config(Config *config) { delete config; } diff --git a/src/libaktualizr-c/test/api-test-utils/api-test-utils.h b/src/libaktualizr-c/test/api-test-utils/api-test-utils.h new file mode 100644 index 0000000000..d0c5782f3a --- /dev/null +++ b/src/libaktualizr-c/test/api-test-utils/api-test-utils.h @@ -0,0 +1,27 @@ +#ifndef API_TEST_UTILS_H +#define API_TEST_UTILS_H + +#ifdef __cplusplus +#include +#include "libaktualizr/config.h" + +using Config = Config; +using FakeHttpServer = boost::process::child; + +extern "C" { +#else +typedef struct Config Config; +typedef struct FakeHttpServer FakeHttpServer; +#endif + +void Run_fake_http_server(const char* serverPath, const char* metaPath); +void Stop_fake_http_server(); + +Config* Get_test_config(); +void Remove_test_config(Config* config); + +#ifdef __cplusplus +} +#endif + +#endif // API_TEST_UTILS_H diff --git a/src/libaktualizr-c/test/api-test.c b/src/libaktualizr-c/test/api-test.c index 8c10a5e573..16b5b37faf 100644 --- a/src/libaktualizr-c/test/api-test.c +++ b/src/libaktualizr-c/test/api-test.c @@ -1,44 +1,222 @@ #include #include +#include +#include "api-test-utils/api-test-utils.h" #include "libaktualizr-c.h" +#define CLEANUP_AND_RETURN_FAILED \ + Stop_fake_http_server(); \ + return EXIT_FAILURE; + +struct EventCounts { + int DownloadProgressReportCount; + int CampaignCheckCompleteCount; + int PutManifestCompleteCount; + int UpdateCheckCompleteCount; + int AllInstallsCompleteCount; + int OtherCount; +} counts; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) + +static void signal_handler(const char *event_name) { + if (strcmp(event_name, "DownloadProgressReport") == 0) { + ++counts.DownloadProgressReportCount; + } else if (strcmp(event_name, "UpdateCheckComplete") == 0) { + ++counts.UpdateCheckCompleteCount; + } else if (strcmp(event_name, "AllInstallsComplete") == 0) { + ++counts.AllInstallsCompleteCount; + } else if (strcmp(event_name, "CampaignCheckComplete") == 0) { + ++counts.CampaignCheckCompleteCount; + } else if (strcmp(event_name, "PutManifestComplete") == 0) { + ++counts.PutManifestCompleteCount; + } else { + ++counts.OtherCount; + } +} + int main(int argc, char **argv) { Aktualizr *a; Campaign *c; + Updates *u; + Target *t; + Config *cfg; int err; - if (argc != 2) { - fprintf(stderr, "Missing config file\nUsage:\n\t%s CONFIG_FILE\n", argv[0]); + if (argc < 3) { + fprintf(stderr, "Incorrect input params\nUsage:\n\t%s FAKE_HTTP_SERVER_PATH META_DIR_PATH\n", argv[0]); return EXIT_FAILURE; } - a = Aktualizr_create(argv[1]); + const char *serverPath = argv[1]; + const char *metaPath = argv[2]; + Run_fake_http_server(serverPath, metaPath); + + cfg = Get_test_config(); + a = Aktualizr_create_from_cfg(cfg); if (!a) { - return EXIT_FAILURE; + printf("Aktualizr_create_from_cfg failed\n"); + CLEANUP_AND_RETURN_FAILED; } err = Aktualizr_initialize(a); if (err) { - return EXIT_FAILURE; + printf("Aktualizr_initialize failed\n"); + CLEANUP_AND_RETURN_FAILED; + } + + counts.DownloadProgressReportCount = 0; + counts.CampaignCheckCompleteCount = 0; + counts.PutManifestCompleteCount = 0; + counts.UpdateCheckCompleteCount = 0; + counts.AllInstallsCompleteCount = 0; + counts.OtherCount = 0; + err = Aktualizr_set_signal_handler(a, &signal_handler); + if (err) { + printf("Aktualizr_set_signal_handler failed\n"); + CLEANUP_AND_RETURN_FAILED; + } + + c = Aktualizr_campaigns_check(a); + if (c == NULL) { + printf("Aktualizr_campaigns_check returned NULL\n"); + CLEANUP_AND_RETURN_FAILED; + } + + printf("Accepting running campaign\n"); + err = Aktualizr_campaign_accept(a, c); + if (err) { + printf("Aktualizr_campaign_accept failed\n"); + CLEANUP_AND_RETURN_FAILED; + } + Aktualizr_campaign_free(c); + + u = Aktualizr_updates_check(a); + if (u == NULL) { + printf("Aktualizr_updates_check returned NULL\n"); + CLEANUP_AND_RETURN_FAILED; + } + + Pause_Status_C status1 = Aktualizr_pause(a); + Pause_Status_C status2 = Aktualizr_pause(a); + if (status1 != kSuccess || status2 != kAlreadyPaused) { + printf("Aktualizr_pause failed, returned %i after first call, %i after second call\n", status1, status2); + CLEANUP_AND_RETURN_FAILED; + } + + Aktualizr_abort(a); + status1 = Aktualizr_pause(a); + if (status1 != kAlreadyPaused) { + printf("Aktualizr_pause failed, returned %i after Aktualizr_abort\n", status1); + CLEANUP_AND_RETURN_FAILED; + } + + status1 = Aktualizr_resume(a); + status2 = Aktualizr_resume(a); + if (status1 != kSuccess || status2 != kAlreadyRunning) { + printf("Aktualizr_resume failed, returned %i after first call, %i after second call\n", status1, status2); + CLEANUP_AND_RETURN_FAILED; + } + + size_t targets_num = Aktualizr_get_targets_num(u); + if (targets_num == 0) { + printf("Aktualizr_get_targets_num returned 0 targets\n"); + CLEANUP_AND_RETURN_FAILED; } - c = Aktualizr_campaign_check(a); - if (c) { - printf("Accepting running campaign\n"); - err = Aktualizr_campaign_accept(a, c); - Aktualizr_campaign_free(c); + printf("Found new updates for %zu target(s)\n", targets_num); + for (size_t i = 0; i < targets_num; i++) { + t = Aktualizr_get_nth_target(u, i); + const char *name = Aktualizr_get_target_name(t); + if (name == NULL) { + printf("Aktualizr_get_target_name returned NULL\n"); + CLEANUP_AND_RETURN_FAILED; + } + printf("Downloading target %s\n", name); + + err = Aktualizr_download_target(a, t); + if (err) { + printf("Aktualizr_download_target failed\n"); + CLEANUP_AND_RETURN_FAILED; + } + + printf("Installing...\n"); + err = Aktualizr_install_target(a, t); + if (err) { + printf("Aktualizr_install_target failed\n"); + CLEANUP_AND_RETURN_FAILED; + } + printf("Installation completed\n"); + + void *handle = Aktualizr_open_stored_target(a, t); + if (!handle) { + printf("Aktualizr_open_stored_target failed\n"); + CLEANUP_AND_RETURN_FAILED; + } + + const size_t bufSize = 1024; + uint8_t *buf = malloc(bufSize); + size_t size = Aktualizr_read_stored_target(handle, buf, bufSize); + printf("Downloading target %s: extracted %li bytes (buffer size = %li), content:\n", name, (long int)size, + (long int)bufSize); + Aktualizr_free_target_name(name); + name = NULL; + + if (size == 0) { + printf("Aktualizr_read_stored_target read 0 bytes\n"); + CLEANUP_AND_RETURN_FAILED; + } + + for (size_t iBuf = 0; iBuf < size; ++iBuf) { + printf("%c", buf[iBuf]); + } + if (size == bufSize) { + printf(" ... (end of content skipped)"); + } + free(buf); + buf = NULL; + + err = Aktualizr_close_stored_target(handle); if (err) { - return EXIT_FAILURE; + printf("Aktualizr_close_stored_target failed\n"); + CLEANUP_AND_RETURN_FAILED; } } +#if 0 err = Aktualizr_uptane_cycle(a); if (err) { - return EXIT_FAILURE; + printf("Aktualizr_uptane_cycle failed\n"); + CLEANUP_AND_RETURN_FAILED; + } +#endif + + err = Aktualizr_send_manifest(a, "({\"test_field\":\"test_value\"})"); + if (err) { + printf("Aktualizr_send_manifest failed\n"); + CLEANUP_AND_RETURN_FAILED; } + err = Aktualizr_send_device_data(a); + if (err) { + printf("Aktualizr_send_device_data failed\n"); + CLEANUP_AND_RETURN_FAILED; + } + + Aktualizr_updates_free(u); Aktualizr_destroy(a); + Remove_test_config(cfg); + + if (counts.AllInstallsCompleteCount == 0 || counts.CampaignCheckCompleteCount == 0 || + counts.PutManifestCompleteCount == 0 || counts.UpdateCheckCompleteCount == 0 || + counts.DownloadProgressReportCount == 0 || counts.OtherCount == 0) { + printf( + "Aktualizr_set_signal_handler failed\nAllInstallsCompleteCount = %i\nCampaignCheckCompleteCount = " + "%i\nPutManifestCompleteCount = %i\nUpdateCheckCompleteCount = %i\nDownloadProgressReportCount = " + "%i\nOtherCount = %i\n", + counts.AllInstallsCompleteCount, counts.CampaignCheckCompleteCount, counts.PutManifestCompleteCount, + counts.UpdateCheckCompleteCount, counts.DownloadProgressReportCount, counts.OtherCount); + return EXIT_FAILURE; + } return EXIT_SUCCESS; } diff --git a/src/libaktualizr-posix/CMakeLists.txt b/src/libaktualizr-posix/CMakeLists.txt index fd7aa04ba2..31a1004a96 100644 --- a/src/libaktualizr-posix/CMakeLists.txt +++ b/src/libaktualizr-posix/CMakeLists.txt @@ -4,18 +4,9 @@ set(SOURCES ipuptanesecondary.cc) set(HEADERS ipuptanesecondary.h) -set(TARGET aktualizr-posix) - -add_library(${TARGET} STATIC - ${SOURCES} - $ - $ -) +add_library(aktualizr-posix STATIC ${SOURCES}) get_property(ASN1_INCLUDE_DIRS TARGET asn1_lib PROPERTY INCLUDE_DIRECTORIES) -target_include_directories(${TARGET} PUBLIC ${ASN1_INCLUDE_DIRS}) - -target_link_libraries(${TARGET} aktualizr_static_lib) - +target_include_directories(aktualizr-posix PUBLIC ${ASN1_INCLUDE_DIRS} ${CMAKE_CURRENT_SOURCE_DIR}) aktualizr_source_file_checks(${HEADERS} ${SOURCES}) diff --git a/src/libaktualizr-posix/asn1/CMakeLists.txt b/src/libaktualizr-posix/asn1/CMakeLists.txt index 2a29a2cbba..1e87466500 100644 --- a/src/libaktualizr-posix/asn1/CMakeLists.txt +++ b/src/libaktualizr-posix/asn1/CMakeLists.txt @@ -16,6 +16,6 @@ compile_asn1_lib(SOURCES messages/tlsconfig.asn1 ) -add_aktualizr_test(NAME asn1 SOURCES $ $ asn1_test.cc) +add_aktualizr_test(NAME asn1 SOURCES $ asn1_test.cc) aktualizr_source_file_checks(${SOURCES} ${HEADERS} asn1_test.cc) diff --git a/src/libaktualizr-posix/asn1/asn1-cer.cc b/src/libaktualizr-posix/asn1/asn1-cer.cc index 4189b763fb..2129e93118 100644 --- a/src/libaktualizr-posix/asn1/asn1-cer.cc +++ b/src/libaktualizr-posix/asn1/asn1-cer.cc @@ -45,6 +45,7 @@ std::string cer_encode_integer(int32_t number) { return res; } +// NOLINTNEXTLINE(misc-no-recursion) std::string cer_encode_string(const std::string& contents, ASN1_UniversalTag tag) { size_t len = contents.length(); @@ -102,6 +103,7 @@ static int32_t cer_decode_length(const std::string& content, int32_t* endpos) { return res; } +// NOLINTNEXTLINE(misc-no-recursion) uint8_t cer_decode_token(const std::string& ber, int32_t* endpos, int32_t* int_param, std::string* string_param) { *endpos = 0; if (ber.length() < 2) { diff --git a/src/libaktualizr-posix/asn1/asn1-cer.h b/src/libaktualizr-posix/asn1/asn1-cer.h index 4ae23f4bd1..c4fe07aa21 100644 --- a/src/libaktualizr-posix/asn1/asn1-cer.h +++ b/src/libaktualizr-posix/asn1/asn1-cer.h @@ -1,6 +1,7 @@ #ifndef ASN1_CER_H #define ASN1_CER_H +#include #include #include diff --git a/src/libaktualizr-posix/asn1/asn1-cerstream.h b/src/libaktualizr-posix/asn1/asn1-cerstream.h index 74eced1ab6..92d17be290 100644 --- a/src/libaktualizr-posix/asn1/asn1-cerstream.h +++ b/src/libaktualizr-posix/asn1/asn1-cerstream.h @@ -14,13 +14,16 @@ class Token { enum TokType { seq_tok, endseq_tok, restseq_tok, expl_tok, peekexpl_tok, endexpl_tok, opt_tok, endopt_tok }; explicit Token(TokType t) { type = t; } virtual ~Token() = default; + Token(const Token&) = default; + Token(Token&&) = default; + Token& operator=(const Token&) = default; + Token& operator=(Token&&) = default; TokType type; }; class EndoptToken : public Token { public: explicit EndoptToken(bool* result = nullptr) : Token(endopt_tok), result_p(result) {} - ~EndoptToken() override = default; bool* result_p; }; @@ -28,7 +31,6 @@ class ExplicitToken : public Token { public: explicit ExplicitToken(uint8_t token_tag, ASN1_Class token_tag_class = kAsn1Context) : Token(expl_tok), tag(token_tag), tag_class(token_tag_class) {} - ~ExplicitToken() override = default; uint8_t tag; ASN1_Class tag_class; }; @@ -37,7 +39,6 @@ class PeekExplicitToken : public Token { public: explicit PeekExplicitToken(uint8_t* token_tag = nullptr, ASN1_Class* token_tag_class = nullptr) : Token(peekexpl_tok), tag(token_tag), tag_class(token_tag_class) {} - ~PeekExplicitToken() override = default; uint8_t* tag; ASN1_Class* tag_class; }; diff --git a/src/libaktualizr-posix/asn1/asn1_message.cc b/src/libaktualizr-posix/asn1/asn1_message.cc index 855a85d9fb..44b4044ff5 100644 --- a/src/libaktualizr-posix/asn1/asn1_message.cc +++ b/src/libaktualizr-posix/asn1/asn1_message.cc @@ -1,21 +1,19 @@ +#include +#include +#include +#include + #include "asn1_message.h" #include "logging/logging.h" #include "utilities/dequeue_buffer.h" -#include "utilities/sockaddr_io.h" #include "utilities/utils.h" -#include -#include - #ifndef MSG_NOSIGNAL #define MSG_NOSIGNAL 0 #endif -#include -#include - int Asn1StringAppendCallback(const void* buffer, size_t size, void* priv) { - auto out_str = static_cast(priv); + auto* out_str = static_cast(priv); out_str->append(std::string(static_cast(buffer), size)); return 0; } @@ -25,11 +23,11 @@ int Asn1StringAppendCallback(const void* buffer, size_t size, void* priv) { * priv is a SocketHandle */ int Asn1SocketWriteCallback(const void* buffer, size_t size, void* priv) { - auto sock = reinterpret_cast(priv); // NOLINT + auto* sock = reinterpret_cast(priv); assert(sock != nullptr); assert(-1 < *sock); - auto b = static_cast(buffer); + const auto* b = static_cast(buffer); size_t len = size; size_t pos = 0; @@ -69,7 +67,12 @@ Asn1Message::Ptr Asn1Rpc(const Asn1Message::Ptr& tx, int con_fd) { DequeueBuffer buffer; ssize_t received; do { + res.code = RC_FAIL; received = recv(con_fd, buffer.Tail(), buffer.TailSpace(), 0); + if (received < 0) { + LOG_ERROR << "Failed to read data from a connection socket: " << strerror(errno); + break; + } LOG_TRACE << "Asn1Rpc read " << Utils::toBase64(std::string(buffer.Tail(), static_cast(received))); buffer.HaveEnqueued(static_cast(received)); res = ber_decode(&context, &asn_DEF_AKIpUptaneMes, reinterpret_cast(&m), buffer.Head(), buffer.Size()); @@ -79,7 +82,7 @@ Asn1Message::Ptr Asn1Rpc(const Asn1Message::Ptr& tx, int con_fd) { Asn1Message::Ptr msg = Asn1Message::FromRaw(&m); if (res.code != RC_OK) { - LOG_ERROR << "Asn1Rpc decoding failed"; + LOG_DEBUG << "Asn1Rpc decoding failed"; msg->present(AKIpUptaneMes_PR_NOTHING); } @@ -87,11 +90,12 @@ Asn1Message::Ptr Asn1Rpc(const Asn1Message::Ptr& tx, int con_fd) { } Asn1Message::Ptr Asn1Rpc(const Asn1Message::Ptr& tx, const std::pair& addr) { - Socket connection(addr.first, addr.second); + ConnectionSocket connection(addr.first, addr.second); if (connection.connect() < 0) { - LOG_ERROR << "Failed to connect to the secondary: " << std::strerror(errno); + LOG_ERROR << "Failed to connect to the Secondary ( " << addr.first << ":" << addr.second + << "): " << std::strerror(errno); return Asn1Message::Empty(); } - return Asn1Rpc(tx, connection.getFD()); + return Asn1Rpc(tx, *connection); } diff --git a/src/libaktualizr-posix/asn1/asn1_message.h b/src/libaktualizr-posix/asn1/asn1_message.h index b0cabf67a8..c0565763c3 100644 --- a/src/libaktualizr-posix/asn1/asn1_message.h +++ b/src/libaktualizr-posix/asn1/asn1_message.h @@ -37,8 +37,11 @@ class Asn1Message { template using SubPtr = Asn1Sub; + ~Asn1Message() { ASN_STRUCT_FREE_CONTENTS_ONLY(asn_DEF_AKIpUptaneMes, &msg_); } Asn1Message(const Asn1Message&) = delete; + Asn1Message(Asn1Message&&) = delete; Asn1Message operator=(const Asn1Message&) = delete; + Asn1Message operator=(Asn1Message&&) = delete; /** * Create a new Asn1Message, in order to fill it with data and send it @@ -52,7 +55,6 @@ class Asn1Message { */ static Asn1Message::Ptr FromRaw(AKIpUptaneMes_t** msg) { return new Asn1Message(msg); } - ~Asn1Message() { ASN_STRUCT_FREE_CONTENTS_ONLY(asn_DEF_AKIpUptaneMes, &msg_); } friend void intrusive_ptr_add_ref(Asn1Message* m) { m->ref_count_++; } friend void intrusive_ptr_release(Asn1Message* m) { if (--m->ref_count_ == 0) { @@ -61,7 +63,10 @@ class Asn1Message { } AKIpUptaneMes_PR present() const { return msg_.present; } - void present(AKIpUptaneMes_PR present) { msg_.present = present; } + Asn1Message& present(AKIpUptaneMes_PR present) { + msg_.present = present; + return *this; + } #define ASN1_MESSAGE_DEFINE_ACCESSOR(MessageType, FieldName) \ SubPtr FieldName() { \ @@ -76,6 +81,60 @@ class Asn1Message { ASN1_MESSAGE_DEFINE_ACCESSOR(AKPutMetaRespMes_t, putMetaResp); ASN1_MESSAGE_DEFINE_ACCESSOR(AKSendFirmwareReqMes_t, sendFirmwareReq); ASN1_MESSAGE_DEFINE_ACCESSOR(AKSendFirmwareRespMes_t, sendFirmwareResp); + ASN1_MESSAGE_DEFINE_ACCESSOR(AKInstallReqMes_t, installReq); + ASN1_MESSAGE_DEFINE_ACCESSOR(AKInstallRespMes_t, installResp); + + ASN1_MESSAGE_DEFINE_ACCESSOR(AKUploadDataReqMes_t, uploadDataReq); + ASN1_MESSAGE_DEFINE_ACCESSOR(AKUploadDataRespMes_t, uploadDataResp); + ASN1_MESSAGE_DEFINE_ACCESSOR(AKDownloadOstreeRevReqMes_t, downloadOstreeRevReq); + ASN1_MESSAGE_DEFINE_ACCESSOR(AKDownloadOstreeRevRespMes_t, downloadOstreeRevResp); + ASN1_MESSAGE_DEFINE_ACCESSOR(AKPutMetaReq2Mes_t, putMetaReq2); + ASN1_MESSAGE_DEFINE_ACCESSOR(AKPutMetaResp2Mes_t, putMetaResp2); + ASN1_MESSAGE_DEFINE_ACCESSOR(AKInstallResp2Mes_t, installResp2); + ASN1_MESSAGE_DEFINE_ACCESSOR(AKVersionReqMes_t, versionReq); + ASN1_MESSAGE_DEFINE_ACCESSOR(AKVersionRespMes_t, versionResp); + + ASN1_MESSAGE_DEFINE_ACCESSOR(AKRootVerReqMes_t, rootVerReq); + ASN1_MESSAGE_DEFINE_ACCESSOR(AKRootVerRespMes_t, rootVerResp); + ASN1_MESSAGE_DEFINE_ACCESSOR(AKPutRootReqMes_t, putRootReq); + ASN1_MESSAGE_DEFINE_ACCESSOR(AKPutRootRespMes_t, putRootResp); + +#define ASN1_MESSAGE_DEFINE_STR_NAME(MessageID) \ + case MessageID: \ + return #MessageID; + + const char* toStr() const { + switch (present()) { + default: + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_NOTHING); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_getInfoReq); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_getInfoResp); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_manifestReq); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_manifestResp); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_putMetaReq); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_putMetaResp); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_sendFirmwareReq); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_sendFirmwareResp); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_installReq); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_installResp); + + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_uploadDataReq); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_uploadDataResp); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_downloadOstreeRevReq); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_downloadOstreeRevResp); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_putMetaReq2); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_putMetaResp2); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_installResp2); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_versionReq); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_versionResp); + + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_rootVerReq); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_rootVerResp); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_putRootReq); + ASN1_MESSAGE_DEFINE_STR_NAME(AKIpUptaneMes_PR_putRootResp); + } + return "Unknown"; + }; /** * The underlying message structure. This is public to simplify calls to @@ -92,6 +151,7 @@ class Asn1Message { explicit Asn1Message(AKIpUptaneMes_t** msg) { if (msg != nullptr && *msg != nullptr) { memmove(&msg_, *msg, sizeof(AKIpUptaneMes_t)); + // NOLINTNEXTLINE(cppcoreguidelines-no-malloc, hicpp-no-malloc) free(*msg); // Be careful. This needs to be the same free() used in der_decode *msg = nullptr; } @@ -122,4 +182,19 @@ void SetString(OCTET_STRING_t* dest, const std::string& str); */ Asn1Message::Ptr Asn1Rpc(const Asn1Message::Ptr& tx, int con_fd); Asn1Message::Ptr Asn1Rpc(const Asn1Message::Ptr& tx, const std::pair& addr); + +/* + * Helper function for creating pointers to ASN.1 types. Note that the encoder + * will free these objects for you. + */ +template +T* Asn1Allocation() { + // NOLINTNEXTLINE(cppcoreguidelines-no-malloc, hicpp-no-malloc) + auto ptr = static_cast(calloc(1, sizeof(T))); + if (!ptr) { + throw std::bad_alloc(); + } + return ptr; +} + #endif // ASN1_MESSAGE_H_ diff --git a/src/libaktualizr-posix/asn1/asn1_test.cc b/src/libaktualizr-posix/asn1/asn1_test.cc index b036add42b..3ed82bba66 100644 --- a/src/libaktualizr-posix/asn1/asn1_test.cc +++ b/src/libaktualizr-posix/asn1/asn1_test.cc @@ -7,10 +7,12 @@ #include #include +#include "libaktualizr/config.h" + #include "asn1-cerstream.h" #include "asn1_message.h" -#include "config/config.h" #include "der_encoder.h" +#include "utilities/utils.h" asn1::Serializer& operator<<(asn1::Serializer& ser, CryptoSource cs) { ser << asn1::implicit(static_cast(static_cast(cs))); diff --git a/src/libaktualizr-posix/asn1/messages/ipuptane_message.asn1 b/src/libaktualizr-posix/asn1/messages/ipuptane_message.asn1 index 39c8cb0fc1..6bf6b0902d 100644 --- a/src/libaktualizr-posix/asn1/messages/ipuptane_message.asn1 +++ b/src/libaktualizr-posix/asn1/messages/ipuptane_message.asn1 @@ -1,95 +1,236 @@ IpUptane DEFINITIONS ::= BEGIN - -- Keep these in Sync with Uptane::KeyType - AKIpUptaneKeyType ::= ENUMERATED { - ed25519(0), - rsa2048(1), - rsa4096(2), - unknownKey(255), - ... - } - - AKInstallationResult ::= ENUMERATED { - success, - failure, - ... - } - - - -- Json format image repository metadata - AKImageMetaJson ::= SEQUENCE { - root OCTET STRING, - timestamp OCTET STRING, - snapshot OCTET STRING, - targets OCTET STRING, - ... - } - - -- Json format director metadata - AKDirectorMetaJson ::= SEQUENCE { - root OCTET STRING, - targets OCTET STRING, - ... - } - - AKGetInfoReqMes ::= SEQUENCE { - ... - } - - AKGetInfoRespMes ::= SEQUENCE { - ecuSerial UTF8String, - hwId UTF8String, - keyType AKIpUptaneKeyType, - key OCTET STRING, - ... - } - - AKManifestReqMes ::= SEQUENCE { - ... - } - - AKManifestRespMes ::= SEQUENCE { - manifest CHOICE { - json OCTET STRING - }, - ... - } - - - AKPutMetaReqMes ::= SEQUENCE { - image CHOICE { - json AKImageMetaJson - }, - director CHOICE { - json AKDirectorMetaJson - }, - ... - } - - AKPutMetaRespMes ::= SEQUENCE { - result AKInstallationResult, - ... - } - - AKSendFirmwareReqMes ::= SEQUENCE { - firmware OCTET STRING, - ... - } - - AKSendFirmwareRespMes ::= SEQUENCE { - result AKInstallationResult, - ... - } - - AKIpUptaneMes ::= CHOICE { - getInfoReq [0] AKGetInfoReqMes, - getInfoResp [1] AKGetInfoRespMes, - manifestReq [2] AKManifestReqMes, - manifestResp [3] AKManifestRespMes, - putMetaReq [4] AKPutMetaReqMes, - putMetaResp [5] AKPutMetaRespMes, - sendFirmwareReq [6] AKSendFirmwareReqMes, - sendFirmwareResp [7] AKSendFirmwareRespMes, - ... - } + -- Keep these in sync with KeyType in types.h. + -- However, this is missing kRSA3072. + AKIpUptaneKeyType ::= ENUMERATED { + ed25519(0), + rsa2048(1), + rsa4096(2), + unknownKey(255), + ... + } + + -- Deprecated (v1). + AKInstallationResult ::= ENUMERATED { + success, + failure, + ... + } + + -- Keep these in sync with data::ResultCode::Numeric in types.h. + AKInstallationResultCode ::= ENUMERATED { + ok(0), + alreadyProcessed(1), + validationFailed(3), + installFailed(4), + downloadFailed(5), + internalError(18), + generalError(19), + needCompletion(21), + customError(22), + unknown(-1), + ... + } + + AKRepoType ::= ENUMERATED { + director, + image, + ... + } + + -- Json format Image repository metadata. Deprecated (v1). + AKImageMetaJson ::= SEQUENCE { + root OCTET STRING, + timestamp OCTET STRING, + snapshot OCTET STRING, + targets OCTET STRING, + ... + } + + -- Json format Director metadata. Deprecated (v1). + AKDirectorMetaJson ::= SEQUENCE { + root OCTET STRING, + targets OCTET STRING, + ... + } + + -- Json format generic single metadata object + AKMetaJson ::= SEQUENCE { + role OCTET STRING, + json OCTET STRING, + ... + } + + -- Collection of metadata objects from one repo + AKMetaCollection ::= SEQUENCE OF AKMetaJson + + AKGetInfoReqMes ::= SEQUENCE { + ... + } + + AKGetInfoRespMes ::= SEQUENCE { + ecuSerial UTF8String, + hwId UTF8String, + keyType AKIpUptaneKeyType, + key OCTET STRING, + ... + } + + AKManifestReqMes ::= SEQUENCE { + ... + } + + AKManifestRespMes ::= SEQUENCE { + manifest CHOICE { + json OCTET STRING + }, + ... + } + + -- Deprecated (v1). + AKPutMetaReqMes ::= SEQUENCE { + image CHOICE { + json AKImageMetaJson + }, + director CHOICE { + json AKDirectorMetaJson + }, + ... + } + + -- Deprecated (v1). + AKPutMetaRespMes ::= SEQUENCE { + result AKInstallationResult, + ... + } + + AKPutMetaReq2Mes ::= SEQUENCE { + imageRepo CHOICE { + collection AKMetaCollection + }, + directorRepo CHOICE { + collection AKMetaCollection + }, + ... + } + + AKPutMetaResp2Mes ::= SEQUENCE { + result AKInstallationResultCode, + description OCTET STRING, + ... + } + + -- Deprecated (v1). + AKSendFirmwareReqMes ::= SEQUENCE { + firmware OCTET STRING, + ... + } + + -- Deprecated (v1). + AKSendFirmwareRespMes ::= SEQUENCE { + result AKInstallationResult, + ... + } + + -- Still used by v2. + AKInstallReqMes ::= SEQUENCE { + hash OCTET STRING, + ... + } + + -- Deprecated (v1). + AKInstallRespMes ::= SEQUENCE { + result AKInstallationResultCode, + ... + } + + AKInstallResp2Mes ::= SEQUENCE { + result AKInstallationResultCode, + description OCTET STRING, + ... + } + + AKUploadDataReqMes ::= SEQUENCE { + data OCTET STRING, + ... + } + + AKUploadDataRespMes ::= SEQUENCE { + result AKInstallationResultCode, + description OCTET STRING, + ... + } + + AKDownloadOstreeRevReqMes ::= SEQUENCE { + tlsCred OCTET STRING, + ... + } + + AKDownloadOstreeRevRespMes ::= SEQUENCE { + result AKInstallationResultCode, + description OCTET STRING, + ... + } + + AKVersionReqMes ::= SEQUENCE { + version INTEGER, + ... + } + + AKVersionRespMes ::= SEQUENCE { + version INTEGER, + ... + } + + AKRootVerReqMes ::= SEQUENCE { + repotype AKRepoType, + ... + } + + AKRootVerRespMes ::= SEQUENCE { + version INTEGER, + ... + } + + AKPutRootReqMes ::= SEQUENCE { + repotype AKRepoType, + json OCTET STRING, + ... + } + + AKPutRootRespMes ::= SEQUENCE { + result AKInstallationResultCode, + description OCTET STRING, + ... + } + + + AKIpUptaneMes ::= CHOICE { + getInfoReq [0] AKGetInfoReqMes, + getInfoResp [1] AKGetInfoRespMes, + manifestReq [2] AKManifestReqMes, + manifestResp [3] AKManifestRespMes, + putMetaReq [4] AKPutMetaReqMes, + putMetaResp [5] AKPutMetaRespMes, + sendFirmwareReq [6] AKSendFirmwareReqMes, + sendFirmwareResp [7] AKSendFirmwareRespMes, + installReq [8] AKInstallReqMes, + installResp [9] AKInstallRespMes, + + uploadDataReq [10] AKUploadDataReqMes, + uploadDataResp [11] AKUploadDataRespMes, + downloadOstreeRevReq [12] AKDownloadOstreeRevReqMes, + downloadOstreeRevResp [13] AKDownloadOstreeRevRespMes, + putMetaReq2 [14] AKPutMetaReq2Mes, + putMetaResp2 [15] AKPutMetaResp2Mes, + installResp2 [16] AKInstallResp2Mes, + versionReq [17] AKVersionReqMes, + versionResp [18] AKVersionRespMes, + + rootVerReq [19] AKRootVerReqMes, + rootVerResp [20] AKRootVerRespMes, + putRootReq [21] AKPutRootReqMes, + putRootResp [22] AKPutRootRespMes, + ... + } END diff --git a/src/libaktualizr-posix/ipuptanesecondary.cc b/src/libaktualizr-posix/ipuptanesecondary.cc index 82d5ca1fad..3174814b7d 100644 --- a/src/libaktualizr-posix/ipuptanesecondary.cc +++ b/src/libaktualizr-posix/ipuptanesecondary.cc @@ -1,43 +1,50 @@ +#include "ipuptanesecondary.h" + #include #include +#include +#include +#include + #include "asn1/asn1_message.h" #include "der_encoder.h" -#include "ipuptanesecondary.h" +#include "libaktualizr/secondary_provider.h" #include "logging/logging.h" - -#include +#include "uptane/tuf.h" +#include "utilities/utils.h" namespace Uptane { -std::pair> IpUptaneSecondary::connectAndCreate( - const std::string& address, unsigned short port) { +SecondaryInterface::Ptr IpUptaneSecondary::connectAndCreate(const std::string& address, unsigned short port, + VerificationType verification_type) { LOG_INFO << "Connecting to and getting info about IP Secondary: " << address << ":" << port << "..."; - Socket con_sock{address, port}; + ConnectionSocket con_sock{address, port}; - if (con_sock.connect() != 0) { - LOG_ERROR << "Failed to connect to a secondary: " << std::strerror(errno); - return {false, std::shared_ptr()}; + if (con_sock.connect() == 0) { + LOG_INFO << "Connected to IP Secondary: " + << "(" << address << ":" << port << ")"; + } else { + LOG_WARNING << "Failed to connect to a Secondary: " << std::strerror(errno); + return nullptr; } - LOG_INFO << "Connected to IP Secondary: " - << "(" << address << ":" << port << ")"; - - return create(address, port, con_sock.getFD()); + return create(address, port, verification_type, *con_sock); } -std::pair> IpUptaneSecondary::create(const std::string& address, - unsigned short port, - int con_fd) { +SecondaryInterface::Ptr IpUptaneSecondary::create(const std::string& address, unsigned short port, + VerificationType verification_type, int con_fd) { Asn1Message::Ptr req(Asn1Message::Empty()); req->present(AKIpUptaneMes_PR_getInfoReq); + auto m = req->getInfoReq(); auto resp = Asn1Rpc(req, con_fd); if (resp->present() != AKIpUptaneMes_PR_getInfoResp) { - LOG_ERROR << "Failed to get info response message from secondary"; - throw std::runtime_error("Failed to obtain information about a secondary: " + address + std::to_string(port)); + LOG_ERROR << "IP Secondary failed to respond to information request at " << address << ":" << port; + return std::make_shared(address, port, verification_type, EcuSerial::Unknown(), + HardwareIdentifier::Unknown(), PublicKey("", KeyType::kUnknown)); } auto r = resp->getInfoResp(); @@ -47,72 +54,287 @@ std::pair> IpUptaneSecondary:: auto type = static_cast(r->keyType); PublicKey pub_key = PublicKey(key, type); - LOG_INFO << "Got info on IP Secondary: " - << "hw-ID: " << hw_id << " serial: " << serial; + LOG_INFO << "Got ECU information from IP Secondary: " + << "hardware ID: " << hw_id << " serial: " << serial; + + return std::make_shared(address, port, verification_type, serial, hw_id, pub_key); +} + +SecondaryInterface::Ptr IpUptaneSecondary::connectAndCheck(const std::string& address, unsigned short port, + VerificationType verification_type, EcuSerial serial, + HardwareIdentifier hw_id, PublicKey pub_key) { + // try to connect: + // - if it succeeds compare with what we expect + // - otherwise, keep using what we know + try { + auto sec = IpUptaneSecondary::connectAndCreate(address, port, verification_type); + if (sec != nullptr) { + auto s = sec->getSerial(); + if (s != serial && serial != EcuSerial::Unknown()) { + LOG_WARNING << "Expected IP Secondary at " << address << ":" << port << " with serial " << serial + << " but found " << s; + } + auto h = sec->getHwId(); + if (h != hw_id && hw_id != HardwareIdentifier::Unknown()) { + LOG_WARNING << "Expected IP Secondary at " << address << ":" << port << " with hardware ID " << hw_id + << " but found " << h; + } + auto p = sec->getPublicKey(); + if (p.Type() == KeyType::kUnknown) { + LOG_ERROR << "IP Secondary at " << address << ":" << port << " has an unknown key type!"; + return nullptr; + } else if (p != pub_key && pub_key.Type() != KeyType::kUnknown) { + LOG_WARNING << "Expected IP Secondary at " << address << ":" << port << " with public key:\n" + << pub_key.Value() << "... but found:\n" + << p.Value(); + } + return sec; + } + } catch (std::exception& e) { + LOG_WARNING << "Could not connect to IP Secondary at " << address << ":" << port << " with serial " << serial; + } - return {true, std::make_shared(address, port, serial, hw_id, pub_key)}; + return std::make_shared(address, port, verification_type, std::move(serial), std::move(hw_id), + std::move(pub_key)); } -IpUptaneSecondary::IpUptaneSecondary(const std::string& address, unsigned short port, EcuSerial serial, - HardwareIdentifier hw_id, PublicKey pub_key) - : addr_{address, port}, serial_{std::move(serial)}, hw_id_{std::move(hw_id)}, pub_key_{std::move(pub_key)} {} +IpUptaneSecondary::IpUptaneSecondary(const std::string& address, unsigned short port, + VerificationType verification_type, EcuSerial serial, HardwareIdentifier hw_id, + PublicKey pub_key) + : addr_{address, port}, + verification_type_{verification_type}, + serial_{std::move(serial)}, + hw_id_{std::move(hw_id)}, + pub_key_{std::move(pub_key)} {} -bool IpUptaneSecondary::putMetadata(const RawMetaPack& meta_pack) { - LOG_INFO << "Sending Uptane metadata to the secondary"; +/* Determine the best protocol version to use for this Secondary. This did not + * exist for v1 and thus only works for v2 and beyond. It would be great if we + * could just do this once, but we do not have a simple way to do that, + * especially because of Secondaries that need to reboot to complete + * installation. */ +void IpUptaneSecondary::getSecondaryVersion() const { + LOG_DEBUG << "Negotiating the protocol version with Secondary " << getSerial(); + const uint32_t latest_version = 2; Asn1Message::Ptr req(Asn1Message::Empty()); - req->present(AKIpUptaneMes_PR_putMetaReq); + req->present(AKIpUptaneMes_PR_versionReq); + auto m = req->versionReq(); + m->version = latest_version; + auto resp = Asn1Rpc(req, getAddr()); + + if (resp->present() != AKIpUptaneMes_PR_versionResp) { + // Bad response probably means v1, but make sure the Secondary is actually + // responsive before assuming that. + if (ping()) { + LOG_DEBUG << "Secondary " << getSerial() << " failed to respond to a version request; assuming version 1."; + protocol_version = 1; + } else { + LOG_INFO << "Secondary " << getSerial() + << " failed to respond to a version request; unable to determine protocol version."; + } + return; + } + + auto r = resp->versionResp(); + const auto secondary_version = static_cast(r->version); + if (secondary_version <= latest_version) { + LOG_DEBUG << "Using protocol version " << secondary_version << " for Secondary " << getSerial(); + protocol_version = secondary_version; + } else { + LOG_ERROR << "Secondary protocol version is " << secondary_version << " but Primary only supports up to " + << latest_version << "! Communication will most likely fail!"; + protocol_version = latest_version; + } +} + +data::InstallationResult IpUptaneSecondary::putMetadata(const Target& target) { + Uptane::MetaBundle meta_bundle; + bool load_result; + if (verification_type_ == VerificationType::kTuf) { + load_result = secondary_provider_->getImageRepoMetadata(&meta_bundle, target); + } else { + load_result = secondary_provider_->getMetadata(&meta_bundle, target); + } + if (!load_result) { + return data::InstallationResult(data::ResultCode::Numeric::kInternalError, + "Unable to load stored metadata from Primary"); + } + + getSecondaryVersion(); + LOG_INFO << "Sending Uptane metadata to the Secondary"; + data::InstallationResult put_result; + if (protocol_version == 2) { + put_result = putMetadata_v2(meta_bundle); + } else if (protocol_version == 1) { + put_result = putMetadata_v1(meta_bundle); + } else { + LOG_ERROR << "Unexpected protocol version: " << protocol_version; + put_result = data::InstallationResult(data::ResultCode::Numeric::kInternalError, + "Unexpected protocol version: " + std::to_string(protocol_version)); + } + return put_result; +} + +data::InstallationResult IpUptaneSecondary::putMetadata_v1(const Uptane::MetaBundle& meta_bundle) { + Asn1Message::Ptr req(Asn1Message::Empty()); + req->present(AKIpUptaneMes_PR_putMetaReq); auto m = req->putMetaReq(); - m->image.present = image_PR_json; - SetString(&m->image.choice.json.root, meta_pack.image_root); // NOLINT - SetString(&m->image.choice.json.targets, meta_pack.image_targets); // NOLINT - SetString(&m->image.choice.json.snapshot, meta_pack.image_snapshot); // NOLINT - SetString(&m->image.choice.json.timestamp, meta_pack.image_timestamp); // NOLINT m->director.present = director_PR_json; - SetString(&m->director.choice.json.root, meta_pack.director_root); // NOLINT - SetString(&m->director.choice.json.targets, meta_pack.director_targets); // NOLINT + // Technically no Secondary supported TUF verification with the v1 protocol, + // so this probably wouldn't work. + if (verification_type_ != VerificationType::kTuf) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + SetString(&m->director.choice.json.root, + getMetaFromBundle(meta_bundle, Uptane::RepositoryType::Director(), Uptane::Role::Root())); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + SetString(&m->director.choice.json.targets, + getMetaFromBundle(meta_bundle, Uptane::RepositoryType::Director(), Uptane::Role::Targets())); + } + + m->image.present = image_PR_json; + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + SetString(&m->image.choice.json.root, + getMetaFromBundle(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Root())); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + SetString(&m->image.choice.json.timestamp, + getMetaFromBundle(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp())); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + SetString(&m->image.choice.json.snapshot, + getMetaFromBundle(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot())); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + SetString(&m->image.choice.json.targets, + getMetaFromBundle(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Targets())); auto resp = Asn1Rpc(req, getAddr()); if (resp->present() != AKIpUptaneMes_PR_putMetaResp) { - LOG_ERROR << "Failed to get response to sending manifest to secondary"; - return false; + LOG_ERROR << "Secondary " << getSerial() << " failed to respond to a request to receive metadata."; + return data::InstallationResult( + data::ResultCode::Numeric::kInternalError, + "Secondary " + getSerial().ToString() + " failed to respond to a request to receive metadata."); } auto r = resp->putMetaResp(); - return r->result == AKInstallationResult_success; + if (r->result == AKInstallationResult_success) { + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); + } else { + return data::InstallationResult(data::ResultCode::Numeric::kVerificationFailed, + "Error sending metadata to Secondary"); + } +} + +void IpUptaneSecondary::addMetadata(const Uptane::MetaBundle& meta_bundle, const Uptane::RepositoryType repo, + const Uptane::Role& role, AKMetaCollection_t& collection) { + auto* meta_json = Asn1Allocation(); + SetString(&meta_json->role, role.ToString()); + SetString(&meta_json->json, getMetaFromBundle(meta_bundle, repo, role)); + ASN_SEQUENCE_ADD(&collection, meta_json); } -bool IpUptaneSecondary::sendFirmware(const std::shared_ptr& data) { - std::lock_guard l(install_mutex); - LOG_INFO << "Sending firmware to the secondary"; +data::InstallationResult IpUptaneSecondary::putMetadata_v2(const Uptane::MetaBundle& meta_bundle) { Asn1Message::Ptr req(Asn1Message::Empty()); - req->present(AKIpUptaneMes_PR_sendFirmwareReq); + req->present(AKIpUptaneMes_PR_putMetaReq2); + auto m = req->putMetaReq2(); + + m->directorRepo.present = directorRepo_PR_collection; + if (verification_type_ != VerificationType::kTuf) { + addMetadata(meta_bundle, Uptane::RepositoryType::Director(), Uptane::Role::Root(), + m->directorRepo.choice.collection); // NOLINT(cppcoreguidelines-pro-type-union-access) + addMetadata(meta_bundle, Uptane::RepositoryType::Director(), Uptane::Role::Targets(), + m->directorRepo.choice.collection); // NOLINT(cppcoreguidelines-pro-type-union-access) + } + + m->imageRepo.present = imageRepo_PR_collection; + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + addMetadata(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Root(), m->imageRepo.choice.collection); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + addMetadata(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp(), m->imageRepo.choice.collection); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + addMetadata(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot(), m->imageRepo.choice.collection); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + addMetadata(meta_bundle, Uptane::RepositoryType::Image(), Uptane::Role::Targets(), m->imageRepo.choice.collection); - auto m = req->sendFirmwareReq(); - SetString(&m->firmware, *data); auto resp = Asn1Rpc(req, getAddr()); - if (resp->present() != AKIpUptaneMes_PR_sendFirmwareResp) { - LOG_ERROR << "Failed to get response to sending firmware to secondary"; - return false; + if (resp->present() != AKIpUptaneMes_PR_putMetaResp2) { + LOG_ERROR << "Secondary " << getSerial() << " failed to respond to a request to receive metadata."; + return data::InstallationResult( + data::ResultCode::Numeric::kInternalError, + "Secondary " + getSerial().ToString() + " failed to respond to a request to receive metadata."); } - auto r = resp->sendFirmwareResp(); - return r->result == AKInstallationResult_success; + auto r = resp->putMetaResp2(); + return data::InstallationResult(static_cast(r->result), ToString(r->description)); } -Json::Value IpUptaneSecondary::getManifest() { - LOG_INFO << "Getting the manifest key of a secondary"; +int32_t IpUptaneSecondary::getRootVersion(bool director) const { + if (director && verification_type_ == VerificationType::kTuf) { + return 0; + } Asn1Message::Ptr req(Asn1Message::Empty()); + req->present(AKIpUptaneMes_PR_rootVerReq); + auto m = req->rootVerReq(); - req->present(AKIpUptaneMes_PR_manifestReq); + if (director) { + m->repotype = AKRepoType_director; + } else { + m->repotype = AKRepoType_image; + } + + auto resp = Asn1Rpc(req, getAddr()); + if (resp->present() != AKIpUptaneMes_PR_rootVerResp) { + // v1 (and v2 until this was added) Secondaries won't understand this. + // Return 0 to indicate that this is unsupported. Sending intermediate Roots + // will be skipped, which could be fatal. There isn't a good way to + // distinguish this from real errors in the message protocol. + LOG_ERROR << "Secondary " << getSerial() << " failed to respond to a Root version request."; + return 0; + } + auto r = resp->rootVerResp(); + return static_cast(r->version); +} + +data::InstallationResult IpUptaneSecondary::putRoot(const std::string& root, bool director) { + if (director && verification_type_ == VerificationType::kTuf) { + return data::InstallationResult(data::ResultCode::Numeric::kOk, + "Secondary " + getSerial().ToString() + + " uses TUF verification and thus does not require Director Root metadata."); + } + Asn1Message::Ptr req(Asn1Message::Empty()); + req->present(AKIpUptaneMes_PR_putRootReq); + auto m = req->putRootReq(); + + if (director) { + m->repotype = AKRepoType_director; + } else { + m->repotype = AKRepoType_image; + } + SetString(&m->json, root); + auto resp = Asn1Rpc(req, getAddr()); + if (resp->present() != AKIpUptaneMes_PR_putRootResp) { + LOG_ERROR << "Secondary " << getSerial() << " failed to respond to a request to receive Root metadata."; + return data::InstallationResult( + data::ResultCode::Numeric::kInternalError, + "Secondary " + getSerial().ToString() + " failed to respond to a request to receive Root metadata."); + } + + auto r = resp->putRootResp(); + return data::InstallationResult(static_cast(r->result), ToString(r->description)); +} + +Manifest IpUptaneSecondary::getManifest() const { + getSecondaryVersion(); + + LOG_DEBUG << "Getting the manifest from Secondary with serial " << getSerial(); + Asn1Message::Ptr req(Asn1Message::Empty()); + + req->present(AKIpUptaneMes_PR_manifestReq); auto resp = Asn1Rpc(req, getAddr()); if (resp->present() != AKIpUptaneMes_PR_manifestResp) { - LOG_ERROR << "Failed to get public key response message from secondary"; + LOG_ERROR << "Secondary " << getSerial() << " failed to respond to a manifest request."; return Json::Value(); } auto r = resp->manifestResp(); @@ -121,7 +343,222 @@ Json::Value IpUptaneSecondary::getManifest() { LOG_ERROR << "Manifest wasn't in json format"; return Json::Value(); } - std::string manifest = ToString(r->manifest.choice.json); // NOLINT + std::string manifest = ToString(r->manifest.choice.json); // NOLINT(cppcoreguidelines-pro-type-union-access) return Utils::parseJSON(manifest); } + +bool IpUptaneSecondary::ping() const { + Asn1Message::Ptr req(Asn1Message::Empty()); + req->present(AKIpUptaneMes_PR_getInfoReq); + + auto m = req->getInfoReq(); + + auto resp = Asn1Rpc(req, getAddr()); + + return resp->present() == AKIpUptaneMes_PR_getInfoResp; +} + +data::InstallationResult IpUptaneSecondary::sendFirmware(const Uptane::Target& target) { + data::InstallationResult send_result; + if (protocol_version == 2) { + send_result = sendFirmware_v2(target); + } else if (protocol_version == 1) { + send_result = sendFirmware_v1(target); + } else { + LOG_ERROR << "Unexpected protocol version: " << protocol_version; + send_result = data::InstallationResult(data::ResultCode::Numeric::kInternalError, + "Unexpected protocol version: " + std::to_string(protocol_version)); + } + return send_result; +} + +data::InstallationResult IpUptaneSecondary::sendFirmware_v1(const Uptane::Target& target) { + std::string data_to_send; + + if (target.IsOstree()) { + // empty firmware means OSTree Secondaries: pack credentials instead + data_to_send = secondary_provider_->getTreehubCredentials(); + } else { + std::stringstream sstr; + auto str = secondary_provider_->getTargetFileHandle(target); + sstr << str.rdbuf(); + data_to_send = sstr.str(); + } + + LOG_INFO << "Sending firmware to the Secondary, size: " << data_to_send.size(); + Asn1Message::Ptr req(Asn1Message::Empty()); + req->present(AKIpUptaneMes_PR_sendFirmwareReq); + + auto m = req->sendFirmwareReq(); + SetString(&m->firmware, data_to_send); + auto resp = Asn1Rpc(req, getAddr()); + + if (resp->present() != AKIpUptaneMes_PR_sendFirmwareResp) { + LOG_ERROR << "Secondary " << getSerial() << " failed to respond to a request to receive firmware."; + return data::InstallationResult( + data::ResultCode::Numeric::kDownloadFailed, + "Secondary " + getSerial().ToString() + " failed to respond to a request to receive firmware."); + } + + auto r = resp->sendFirmwareResp(); + if (r->result == AKInstallationResult_success) { + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); + } + return data::InstallationResult(data::ResultCode::Numeric::kDownloadFailed, ""); +} + +data::InstallationResult IpUptaneSecondary::sendFirmware_v2(const Uptane::Target& target) { + LOG_INFO << "Instructing Secondary " << getSerial() << " to receive target " << target.filename(); + if (target.IsOstree()) { + return downloadOstreeRev(target); + } else { + return uploadFirmware(target); + } +} + +data::InstallationResult IpUptaneSecondary::install(const Uptane::Target& target) { + data::InstallationResult install_result; + if (protocol_version == 2) { + install_result = install_v2(target); + } else if (protocol_version == 1) { + install_result = install_v1(target); + } else { + LOG_ERROR << "Unexpected protocol version: " << protocol_version; + install_result = data::InstallationResult(data::ResultCode::Numeric::kInternalError, + "Unexpected protocol version: " + std::to_string(protocol_version)); + } + + return install_result; +} + +data::InstallationResult IpUptaneSecondary::install_v1(const Uptane::Target& target) { + LOG_INFO << "Invoking an installation of the target on the Secondary: " << target.filename(); + + Asn1Message::Ptr req(Asn1Message::Empty()); + req->present(AKIpUptaneMes_PR_installReq); + + // prepare request message + auto req_mes = req->installReq(); + SetString(&req_mes->hash, target.filename()); + // send request and receive response, a request-response type of RPC + auto resp = Asn1Rpc(req, getAddr()); + + // invalid type of an response message + if (resp->present() != AKIpUptaneMes_PR_installResp) { + LOG_ERROR << "Secondary " << getSerial() << " failed to respond to an installation request."; + return data::InstallationResult( + data::ResultCode::Numeric::kInternalError, + "Secondary " + getSerial().ToString() + " failed to respond to an installation request."); + } + + // deserialize the response message + auto r = resp->installResp(); + + return data::InstallationResult(static_cast(r->result), ""); +} + +data::InstallationResult IpUptaneSecondary::install_v2(const Uptane::Target& target) { + LOG_INFO << "Instructing Secondary " << getSerial() << " to install target " << target.filename(); + return invokeInstallOnSecondary(target); +} + +data::InstallationResult IpUptaneSecondary::downloadOstreeRev(const Uptane::Target& target) { + LOG_INFO << "Instructing Secondary " << getSerial() << " to download OSTree commit " << target.sha256Hash(); + const std::string tls_creds = secondary_provider_->getTreehubCredentials(); + Asn1Message::Ptr req(Asn1Message::Empty()); + req->present(static_cast(AKIpUptaneMes_PR_downloadOstreeRevReq)); + + auto m = req->downloadOstreeRevReq(); + SetString(&m->tlsCred, tls_creds); + auto resp = Asn1Rpc(req, getAddr()); + + if (resp->present() != AKIpUptaneMes_PR_downloadOstreeRevResp) { + LOG_ERROR << "Secondary " << getSerial() << " failed to respond to a request to download an OSTree commit."; + return data::InstallationResult( + data::ResultCode::Numeric::kUnknown, + "Secondary " + getSerial().ToString() + " failed to respond to a request to download an OSTree commit."); + } + + auto r = resp->downloadOstreeRevResp(); + return data::InstallationResult(static_cast(r->result), ToString(r->description)); +} + +data::InstallationResult IpUptaneSecondary::uploadFirmware(const Uptane::Target& target) { + LOG_INFO << "Uploading the target image (" << target.filename() << ") " + << "to the Secondary (" << getSerial() << ")"; + + auto upload_result = data::InstallationResult(data::ResultCode::Numeric::kDownloadFailed, ""); + + auto image_reader = secondary_provider_->getTargetFileHandle(target); + + uint64_t image_size = target.length(); + const size_t size = 1024; + size_t total_send_data = 0; + std::array buf{}; + auto upload_data_result = data::InstallationResult(data::ResultCode::Numeric::kOk, ""); + + while (total_send_data < image_size && upload_data_result.isSuccess()) { + image_reader.read(reinterpret_cast(buf.data()), buf.size()); + upload_data_result = uploadFirmwareData(buf.data(), static_cast(image_reader.gcount())); + total_send_data += static_cast(image_reader.gcount()); + } + if (upload_data_result.isSuccess() && total_send_data == image_size) { + upload_result = data::InstallationResult(data::ResultCode::Numeric::kOk, ""); + } else if (!upload_data_result.isSuccess()) { + upload_result = upload_data_result; + } else { + upload_result = data::InstallationResult(data::ResultCode::Numeric::kDownloadFailed, "Incomplete upload"); + } + image_reader.close(); + return upload_result; +} + +data::InstallationResult IpUptaneSecondary::uploadFirmwareData(const uint8_t* data, size_t size) { + Asn1Message::Ptr req(Asn1Message::Empty()); + req->present(AKIpUptaneMes_PR_uploadDataReq); + + auto m = req->uploadDataReq(); + OCTET_STRING_fromBuf(&m->data, reinterpret_cast(data), static_cast(size)); + auto resp = Asn1Rpc(req, getAddr()); + + if (resp->present() == AKIpUptaneMes_PR_NOTHING) { + LOG_ERROR << "Secondary " << getSerial() << " failed to respond to a request to receive firmware data."; + return data::InstallationResult( + data::ResultCode::Numeric::kUnknown, + "Secondary " + getSerial().ToString() + " failed to respond to a request to receive firmware data."); + } + if (resp->present() != AKIpUptaneMes_PR_uploadDataResp) { + LOG_ERROR << "Secondary " << getSerial() << " returned an invalid response to a request to receive firmware data."; + return data::InstallationResult( + data::ResultCode::Numeric::kInternalError, + "Secondary " + getSerial().ToString() + " returned an invalid reponse to a request to receive firmware data."); + } + + auto r = resp->uploadDataResp(); + return data::InstallationResult(static_cast(r->result), ToString(r->description)); +} + +data::InstallationResult IpUptaneSecondary::invokeInstallOnSecondary(const Uptane::Target& target) { + Asn1Message::Ptr req(Asn1Message::Empty()); + req->present(AKIpUptaneMes_PR_installReq); + + // prepare request message + auto req_mes = req->installReq(); + SetString(&req_mes->hash, target.filename()); + // send request and receive response, a request-response type of RPC + auto resp = Asn1Rpc(req, getAddr()); + + // invalid type of an response message + if (resp->present() != AKIpUptaneMes_PR_installResp2) { + LOG_ERROR << "Secondary " << getSerial() << " failed to respond to an installation request."; + return data::InstallationResult( + data::ResultCode::Numeric::kUnknown, + "Secondary " + getSerial().ToString() + " failed to respond to an installation request."); + } + + // deserialize the response message + auto r = resp->installResp2(); + return data::InstallationResult(static_cast(r->result), ToString(r->description)); +} + } // namespace Uptane diff --git a/src/libaktualizr-posix/ipuptanesecondary.h b/src/libaktualizr-posix/ipuptanesecondary.h index 68e423d0cb..b99a0387f1 100644 --- a/src/libaktualizr-posix/ipuptanesecondary.h +++ b/src/libaktualizr-posix/ipuptanesecondary.h @@ -1,46 +1,67 @@ #ifndef UPTANE_IPUPTANESECONDARY_H_ #define UPTANE_IPUPTANESECONDARY_H_ -#include -#include +#include "libaktualizr/secondaryinterface.h" +#include "libaktualizr/types.h" -#include "uptane/secondaryinterface.h" +struct AKMetaCollection; +using AKMetaCollection_t = struct AKMetaCollection; namespace Uptane { class IpUptaneSecondary : public SecondaryInterface { public: - static std::pair> connectAndCreate(const std::string& address, - unsigned short port); + static SecondaryInterface::Ptr connectAndCreate(const std::string& address, unsigned short port, + VerificationType verification_type); + static SecondaryInterface::Ptr create(const std::string& address, unsigned short port, + VerificationType verification_type, int con_fd); - static std::pair> create(const std::string& address, - unsigned short port, int con_fd); + static SecondaryInterface::Ptr connectAndCheck(const std::string& address, unsigned short port, + VerificationType verification_type, EcuSerial serial, + HardwareIdentifier hw_id, PublicKey pub_key); - explicit IpUptaneSecondary(const std::string& address, unsigned short port, EcuSerial serial, - HardwareIdentifier hw_id, PublicKey pub_key); + explicit IpUptaneSecondary(const std::string& address, unsigned short port, VerificationType verification_type, + EcuSerial serial, HardwareIdentifier hw_id, PublicKey pub_key); - // It looks more natural to return const EcuSerial& and const Uptane::HardwareIdentifier& - // and they should be 'const' methods - EcuSerial getSerial() /*const*/ override { return serial_; }; - Uptane::HardwareIdentifier getHwId() /*const*/ override { return hw_id_; } - PublicKey getPublicKey() /*const*/ override { return pub_key_; } + std::string Type() const override { return "IP"; } + EcuSerial getSerial() const override { return serial_; }; + Uptane::HardwareIdentifier getHwId() const override { return hw_id_; } + PublicKey getPublicKey() const override { return pub_key_; } - bool putMetadata(const RawMetaPack& meta_pack) override; - int32_t getRootVersion(bool /* director */) override { return 0; } - bool putRoot(const std::string& /* root */, bool /* director */) override { return true; } - bool sendFirmware(const std::shared_ptr& data) override; - Json::Value getManifest() override; + void init(std::shared_ptr secondary_provider_in) override { + secondary_provider_ = std::move(secondary_provider_in); + } + data::InstallationResult putMetadata(const Target& target) override; + int32_t getRootVersion(bool director) const override; + data::InstallationResult putRoot(const std::string& root, bool director) override; + Manifest getManifest() const override; + bool ping() const override; + data::InstallationResult sendFirmware(const Uptane::Target& target) override; + data::InstallationResult install(const Uptane::Target& target) override; private: const std::pair& getAddr() const { return addr_; } + void getSecondaryVersion() const; + data::InstallationResult putMetadata_v1(const Uptane::MetaBundle& meta_bundle); + data::InstallationResult putMetadata_v2(const Uptane::MetaBundle& meta_bundle); + data::InstallationResult sendFirmware_v1(const Uptane::Target& target); + data::InstallationResult sendFirmware_v2(const Uptane::Target& target); + data::InstallationResult install_v1(const Uptane::Target& target); + data::InstallationResult install_v2(const Uptane::Target& target); + static void addMetadata(const Uptane::MetaBundle& meta_bundle, Uptane::RepositoryType repo, const Uptane::Role& role, + AKMetaCollection_t& collection); + data::InstallationResult invokeInstallOnSecondary(const Uptane::Target& target); + data::InstallationResult downloadOstreeRev(const Uptane::Target& target); + data::InstallationResult uploadFirmware(const Uptane::Target& target); + data::InstallationResult uploadFirmwareData(const uint8_t* data, size_t size); - private: - std::mutex install_mutex; - - std::pair addr_; + std::shared_ptr secondary_provider_; + const std::pair addr_; + const VerificationType verification_type_; const EcuSerial serial_; const HardwareIdentifier hw_id_; const PublicKey pub_key_; + mutable uint32_t protocol_version{0}; }; } // namespace Uptane diff --git a/src/libaktualizr/CMakeLists.txt b/src/libaktualizr/CMakeLists.txt index f6163b9383..5e5a3e088a 100644 --- a/src/libaktualizr/CMakeLists.txt +++ b/src/libaktualizr/CMakeLists.txt @@ -1,25 +1,42 @@ +set(LIBAKTUALIZR_PUBLIC_HEADERS + ../../include/libaktualizr/config.h + ../../include/libaktualizr/types.h + ../../include/libaktualizr/events.h + ../../include/libaktualizr/results.h + ../../include/libaktualizr/campaign.h + ../../include/libaktualizr/secondaryinterface.h + ../../include/libaktualizr/secondary_provider.h + ../../include/libaktualizr/packagemanagerinterface.h + ../../include/libaktualizr/packagemanagerfactory.h) + +aktualizr_source_file_checks(${LIBAKTUALIZR_PUBLIC_HEADERS}) + +# note: the config object is composed with multiple sub-config objects that live +# close to the modules they correspond to. To make the config module as +# self-contained as possible, the method definitions of these sub objects are +# also added to the module with CMake `target_sources(config PRIVATE ...)` +# declarations. # config has to go first, as other libraries append sources to it -add_subdirectory("config") +add_library(config OBJECT) -add_subdirectory("utilities") add_subdirectory("bootloader") add_subdirectory("bootstrap") add_subdirectory("campaign") +add_subdirectory("config") add_subdirectory("crypto") add_subdirectory("http") -add_subdirectory("primary") -add_subdirectory("uptane") add_subdirectory("logging") -add_subdirectory("storage") -add_subdirectory("socket_activation") add_subdirectory("package_manager") +add_subdirectory("primary") +add_subdirectory("storage") add_subdirectory("telemetry") +add_subdirectory("uptane") +add_subdirectory("utilities") -if(BUILD_ISOTP) - add_subdirectory("isotp_conn") -endif(BUILD_ISOTP) - +# deprecated, we recommend using aktualizr_lib add_library(aktualizr_static_lib STATIC + $ + $ $ $ $ @@ -27,17 +44,37 @@ add_library(aktualizr_static_lib STATIC $ $ $ + $ $ + $ $ - $ + $ + $ + $) + +add_library(aktualizr_lib SHARED + $ + $ + $ + $ + $ + $ + $ + $ + $ $ + $ + $ + $ $ $ - $) + $) -if (BUILD_ISOTP) - target_sources(aktualizr_static_lib PRIVATE $) -endif (BUILD_ISOTP) +target_link_libraries(aktualizr_lib ${AKTUALIZR_EXTERNAL_LIBS}) +set_target_properties(aktualizr_lib PROPERTIES LIBRARY_OUTPUT_NAME aktualizr) +install(TARGETS aktualizr_lib LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT aktualizr) +install(DIRECTORY ${PROJECT_SOURCE_DIR}/include/libaktualizr DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/../../jsoncpp/json/json.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/libaktualizr/json) -target_include_directories(aktualizr_static_lib PUBLIC - $) +configure_file(aktualizr.pc.in aktualizr.pc @ONLY) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/aktualizr.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) diff --git a/src/libaktualizr/aktualizr.pc.in b/src/libaktualizr/aktualizr.pc.in new file mode 100644 index 0000000000..1643bd565e --- /dev/null +++ b/src/libaktualizr/aktualizr.pc.in @@ -0,0 +1,10 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +exec_prefix=${prefix} +includedir=${prefix}/include/libaktualizr +libdir=${exec_prefix}/lib + +Name: libaktualizr +Description: The libaktualizr library +Version: @AKTUALIZR_VERSION@ +Cflags: -I${includedir} +Libs: -L${libdir} -laktualizr diff --git a/src/libaktualizr/bootloader/CMakeLists.txt b/src/libaktualizr/bootloader/CMakeLists.txt index eb67e8f279..9c02c48fcc 100644 --- a/src/libaktualizr/bootloader/CMakeLists.txt +++ b/src/libaktualizr/bootloader/CMakeLists.txt @@ -1,4 +1,4 @@ -set(HEADERS bootloader.h bootloader_config.h) +set(HEADERS bootloader.h) set(SOURCES bootloader.cc) add_library(bootloader OBJECT ${SOURCES}) diff --git a/src/libaktualizr/bootloader/bootloader.cc b/src/libaktualizr/bootloader/bootloader.cc index 62b33919fe..45c1525a06 100644 --- a/src/libaktualizr/bootloader/bootloader.cc +++ b/src/libaktualizr/bootloader/bootloader.cc @@ -1,16 +1,17 @@ -#include -#include +#include "bootloader.h" +#include #include +#include #include -#include +#include -#include "bootloader.h" +#include "storage/invstorage.h" #include "utilities/exceptions.h" #include "utilities/utils.h" -Bootloader::Bootloader(const BootloaderConfig& config, INvStorage& storage) : config_(config), storage_(storage) { +Bootloader::Bootloader(BootloaderConfig config, INvStorage& storage) : config_(std::move(config)), storage_(storage) { reboot_sentinel_ = config_.reboot_sentinel_dir / config_.reboot_sentinel_name; reboot_command_ = config_.reboot_command; @@ -41,6 +42,14 @@ void Bootloader::setBootOK() const { LOG_WARNING << "Failed resetting upgrade_available for u-boot"; } break; + case RollbackMode::kFioVB: + if (Utils::shell("fiovb_setenv bootcount 0", &sink) != 0) { + LOG_WARNING << "Failed resetting bootcount"; + } + if (Utils::shell("fiovb_setenv upgrade_available 0", &sink) != 0) { + LOG_WARNING << "Failed resetting upgrade_available"; + } + break; default: throw NotImplementedException(); } @@ -70,6 +79,17 @@ void Bootloader::updateNotify() const { LOG_WARNING << "Failed resetting rollback flag"; } break; + case RollbackMode::kFioVB: + if (Utils::shell("fiovb_setenv bootcount 0", &sink) != 0) { + LOG_WARNING << "Failed resetting bootcount"; + } + if (Utils::shell("fiovb_setenv upgrade_available 1", &sink) != 0) { + LOG_WARNING << "Failed setting upgrade_available"; + } + if (Utils::shell("fiovb_setenv rollback 0", &sink) != 0) { + LOG_WARNING << "Failed resetting rollback flag"; + } + break; default: throw NotImplementedException(); } @@ -114,8 +134,8 @@ void Bootloader::rebootFlagClear() { boost::filesystem::remove(reboot_sentinel_); } -void Bootloader::reboot(bool fake_reboot) { - if (fake_reboot) { +void Bootloader::reboot(bool fake) { + if (fake) { boost::filesystem::remove(reboot_sentinel_); return; } diff --git a/src/libaktualizr/bootloader/bootloader.h b/src/libaktualizr/bootloader/bootloader.h index a1ae59b150..7fedb019d7 100644 --- a/src/libaktualizr/bootloader/bootloader.h +++ b/src/libaktualizr/bootloader/bootloader.h @@ -1,15 +1,21 @@ #ifndef BOOTLOADER_H_ #define BOOTLOADER_H_ -#include "bootloader_config.h" +#include "libaktualizr/config.h" -#include "storage/invstorage.h" +class INvStorage; class Bootloader { public: - Bootloader(const BootloaderConfig& config, INvStorage& storage); - void setBootOK() const; - void updateNotify() const; + Bootloader(BootloaderConfig config, INvStorage& storage); + virtual ~Bootloader() = default; + Bootloader(const Bootloader&) = delete; + Bootloader(Bootloader&&) = delete; + Bootloader& operator=(const Bootloader&) = delete; + Bootloader& operator=(Bootloader&&) = delete; + virtual void setBootOK() const; + virtual void updateNotify() const; + virtual void installNotify(const Uptane::Target& target) const { (void)target; } // Reboot handling (uses storage) // @@ -22,11 +28,12 @@ class Bootloader { bool rebootDetected() const; void rebootFlagSet(); void rebootFlagClear(); - void reboot(bool fake_reboot = false); + void reboot(bool fake = false); - private: - const BootloaderConfig& config_; + protected: + const BootloaderConfig config_; + private: INvStorage& storage_; boost::filesystem::path reboot_sentinel_; std::string reboot_command_; diff --git a/src/libaktualizr/bootloader/bootloader_config.cc b/src/libaktualizr/bootloader/bootloader_config.cc index 56ab406da5..09be0bc274 100644 --- a/src/libaktualizr/bootloader/bootloader_config.cc +++ b/src/libaktualizr/bootloader/bootloader_config.cc @@ -1,4 +1,4 @@ -#include "bootloader_config.h" +#include "libaktualizr/config.h" #include "utilities/config_utils.h" std::ostream& operator<<(std::ostream& os, RollbackMode mode) { @@ -10,6 +10,9 @@ std::ostream& operator<<(std::ostream& os, RollbackMode mode) { case RollbackMode::kUbootMasked: mode_s = "uboot_masked"; break; + case RollbackMode::kFioVB: + mode_s = "fiovb"; + break; default: mode_s = "none"; break; @@ -27,6 +30,8 @@ inline void CopyFromConfig(RollbackMode& dest, const std::string& option_name, c dest = RollbackMode::kUbootGeneric; } else if (mode == "uboot_masked") { dest = RollbackMode::kUbootMasked; + } else if (mode == "fiovb") { + dest = RollbackMode::kFioVB; } else { dest = RollbackMode::kBootloaderNone; } diff --git a/src/libaktualizr/bootloader/bootloader_config.h b/src/libaktualizr/bootloader/bootloader_config.h deleted file mode 100644 index 797da537dc..0000000000 --- a/src/libaktualizr/bootloader/bootloader_config.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef BOOTLOADER_CONFIG_H_ -#define BOOTLOADER_CONFIG_H_ - -#include -#include -#include - -enum class RollbackMode { kBootloaderNone = 0, kUbootGeneric, kUbootMasked }; -std::ostream& operator<<(std::ostream& os, RollbackMode mode); - -struct BootloaderConfig { - RollbackMode rollback_mode{RollbackMode::kBootloaderNone}; - boost::filesystem::path reboot_sentinel_dir{"/var/run/aktualizr-session"}; - boost::filesystem::path reboot_sentinel_name{"need_reboot"}; - std::string reboot_command{"/sbin/reboot"}; - - void updateFromPropertyTree(const boost::property_tree::ptree& pt); - void writeToStream(std::ostream& out_stream) const; -}; - -#endif // BOOTLOADER_CONFIG_H_ diff --git a/src/libaktualizr/bootloader/bootloader_test.cc b/src/libaktualizr/bootloader/bootloader_test.cc index a29b9a274a..3af83ab692 100644 --- a/src/libaktualizr/bootloader/bootloader_test.cc +++ b/src/libaktualizr/bootloader/bootloader_test.cc @@ -2,6 +2,11 @@ #include "bootloader.h" +#include + +#include "storage/invstorage.h" +#include "utilities/utils.h" + /* Check that the reboot detection feature works */ TEST(bootloader, detectReboot) { TemporaryDirectory temp_dir; diff --git a/src/libaktualizr/bootstrap/bootstrap.cc b/src/libaktualizr/bootstrap/bootstrap.cc index e883613054..b48196c56e 100644 --- a/src/libaktualizr/bootstrap/bootstrap.cc +++ b/src/libaktualizr/bootstrap/bootstrap.cc @@ -1,6 +1,5 @@ #include "bootstrap.h" -#include #include #include @@ -8,22 +7,21 @@ #include "logging/logging.h" #include "utilities/utils.h" -Bootstrap::Bootstrap(const boost::filesystem::path& provision_path, const std::string& provision_password) - : ca_(""), cert_(""), pkey_("") { +Bootstrap::Bootstrap(const boost::filesystem::path& provision_path, const std::string& provision_password) { if (provision_path.empty()) { LOG_ERROR << "Provision path is empty!"; - throw std::runtime_error("Unable to parse bootstrap credentials"); + throw std::runtime_error("Unable to parse bootstrap (shared) credentials"); } std::ifstream as(provision_path.c_str(), std::ios::in | std::ios::binary); if (as.fail()) { - LOG_ERROR << "Unable to open provided provision archive " << provision_path << ": " << std::strerror(errno); - throw std::runtime_error("Unable to parse bootstrap credentials"); + LOG_ERROR << "Unable to open provided provisioning archive " << provision_path << ": " << std::strerror(errno); + throw std::runtime_error("Unable to parse bootstrap (shared) credentials"); } std::string p12_str = Utils::readFileFromArchive(as, "autoprov_credentials.p12"); if (p12_str.empty()) { - throw std::runtime_error("Unable to parse bootstrap credentials"); + throw std::runtime_error("Unable to parse bootstrap (shared) credentials"); } readTlsP12(p12_str, provision_password, pkey_, cert_, ca_); @@ -48,12 +46,12 @@ std::string Bootstrap::readServerUrl(const boost::filesystem::path& provision_pa try { std::ifstream as(provision_path.c_str(), std::ios::in | std::ios::binary); if (as.fail()) { - LOG_ERROR << "Unable to open provided provision archive " << provision_path << ": " << std::strerror(errno); + LOG_ERROR << "Unable to open provided provisioning archive " << provision_path << ": " << std::strerror(errno); throw std::runtime_error("Unable to parse bootstrap credentials"); } url = Utils::readFileFromArchive(as, "autoprov.url", true); } catch (std::runtime_error& exc) { - LOG_ERROR << "Unable to read server url from archive: " << exc.what(); + LOG_ERROR << "Unable to read server URL from archive: " << exc.what(); url = ""; } @@ -65,12 +63,12 @@ std::string Bootstrap::readServerCa(const boost::filesystem::path& provision_pat try { std::ifstream as(provision_path.c_str(), std::ios::in | std::ios::binary); if (as.fail()) { - LOG_ERROR << "Unable to open provided provision archive " << provision_path << ": " << std::strerror(errno); + LOG_ERROR << "Unable to open provided provisioning archive " << provision_path << ": " << std::strerror(errno); throw std::runtime_error("Unable to parse bootstrap credentials"); } server_ca = Utils::readFileFromArchive(as, "server_ca.pem"); } catch (std::runtime_error& exc) { - LOG_ERROR << "Unable to read server ca from archive: " << exc.what(); + LOG_ERROR << "Unable to read server CA certificate from archive: " << exc.what(); return ""; } diff --git a/src/libaktualizr/bootstrap/bootstrap.h b/src/libaktualizr/bootstrap/bootstrap.h index 0726b1a6b1..e26ebb8cd7 100644 --- a/src/libaktualizr/bootstrap/bootstrap.h +++ b/src/libaktualizr/bootstrap/bootstrap.h @@ -1,7 +1,7 @@ #ifndef AKTUALIZR_BOOTSTRAP_H #define AKTUALIZR_BOOTSTRAP_H -#include +#include #include class Bootstrap { diff --git a/src/libaktualizr/campaign/CMakeLists.txt b/src/libaktualizr/campaign/CMakeLists.txt index 51f9b30e2e..aa216867d5 100644 --- a/src/libaktualizr/campaign/CMakeLists.txt +++ b/src/libaktualizr/campaign/CMakeLists.txt @@ -1,8 +1,7 @@ set(SOURCES campaign.cc) -set(HEADERS campaign.h) add_library(campaign OBJECT ${SOURCES}) add_aktualizr_test(NAME campaign SOURCES campaign_test.cc ARGS ${CMAKE_CURRENT_SOURCE_DIR}/test) -aktualizr_source_file_checks(${SOURCES} ${HEADERS} ${TEST_SOURCES}) +aktualizr_source_file_checks(${SOURCES} ${TEST_SOURCES}) diff --git a/src/libaktualizr/campaign/campaign.cc b/src/libaktualizr/campaign/campaign.cc index e292228f6d..b2f6599cc8 100644 --- a/src/libaktualizr/campaign/campaign.cc +++ b/src/libaktualizr/campaign/campaign.cc @@ -1,29 +1,28 @@ -#include "campaign/campaign.h" +#include "libaktualizr/campaign.h" +#include "http/httpclient.h" +#include "utilities/utils.h" namespace campaign { -Campaign Campaign::fromJson(const Json::Value &json) { +Campaign::Campaign(const Json::Value &json) { try { if (!json.isObject()) { throw CampaignParseError(); } - std::string id = json["id"].asString(); + id = json["id"].asString(); if (id.empty()) { throw CampaignParseError(); } - std::string name = json["name"].asString(); + name = json["name"].asString(); if (name.empty()) { throw CampaignParseError(); } - int64_t size = json.get("size", 0).asInt64(); - bool autoAccept = json.get("autoAccept", false).asBool(); + size = json.get("size", 0).asInt64(); + autoAccept = json.get("autoAccept", false).asBool(); - std::string description; - int estInstallationDuration = 0; - int estPreparationDuration = 0; for (const auto &o : json["metadata"]) { if (!o.isObject()) { continue; @@ -47,7 +46,6 @@ Campaign Campaign::fromJson(const Json::Value &json) { } } - return {id, name, size, autoAccept, description, estInstallationDuration, estPreparationDuration}; } catch (const std::runtime_error &exc) { LOG_ERROR << exc.what(); throw CampaignParseError(); @@ -56,7 +54,23 @@ Campaign Campaign::fromJson(const Json::Value &json) { } } -std::vector campaignsFromJson(const Json::Value &json) { +void Campaign::getJson(Json::Value &out) const { + out.clear(); + + out["id"] = id; + out["name"] = name; + out["size"] = Json::UInt(size); + out["autoAccept"] = autoAccept; + + out["metadata"][0]["type"] = "DESCRIPTION"; + out["metadata"][0]["value"] = description; + out["metadata"][1]["type"] = "ESTIMATED_INSTALLATION_DURATION"; + out["metadata"][1]["value"] = std::to_string(estInstallationDuration); + out["metadata"][2]["type"] = "ESTIMATED_PREPARATION_DURATION"; + out["metadata"][2]["value"] = std::to_string(estPreparationDuration); +} + +std::vector Campaign::campaignsFromJson(const Json::Value &json) { std::vector campaigns; Json::Value campaigns_array; @@ -75,7 +89,7 @@ std::vector campaignsFromJson(const Json::Value &json) { for (const auto &c : campaigns_array) { try { - campaigns.push_back(Campaign::fromJson(c)); + campaigns.emplace_back(Campaign(c)); } catch (const CampaignParseError &exc) { LOG_ERROR << "Error parsing " << c << ": " << exc.what(); } @@ -83,7 +97,18 @@ std::vector campaignsFromJson(const Json::Value &json) { return campaigns; } -std::vector fetchAvailableCampaigns(HttpInterface &http_client, const std::string &tls_server) { +void Campaign::JsonFromCampaigns(const std::vector &in, Json::Value &out) { + out.clear(); + auto i = 0; + Json::Value json; + for (const auto &c : in) { + c.getJson(json); + out["campaigns"][i] = json; + ++i; + } +} + +std::vector Campaign::fetchAvailableCampaigns(HttpInterface &http_client, const std::string &tls_server) { HttpResponse response = http_client.get(tls_server + "/campaigner/campaigns", kMaxCampaignsMetaSize); if (!response.isOk()) { LOG_ERROR << "Failed to fetch list of available campaigns"; diff --git a/src/libaktualizr/campaign/campaign.h b/src/libaktualizr/campaign/campaign.h deleted file mode 100644 index 7334d0624f..0000000000 --- a/src/libaktualizr/campaign/campaign.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef CAMPAIGN_CAMPAIGN_H_ -#define CAMPAIGN_CAMPAIGN_H_ - -#include -#include -#include "http/httpclient.h" -#include "utilities/utils.h" - -namespace campaign { - -constexpr int64_t kMaxCampaignsMetaSize = 1024 * 1024; - -class CampaignParseError : std::exception { - public: - const char *what() const noexcept override { return "Could not parse Campaign metadata"; } -}; - -enum class Cmd { - Accept, - Decline, - Postpone, -}; - -static inline Cmd cmdFromName(const std::string &name) { - return std::map{ - {"campaign_accept", Cmd::Accept}, {"campaign_decline", Cmd::Decline}, {"campaign_postpone", Cmd::Postpone}} - .at(name); -} - -// Out of uptane concept: update campaign for a device -class Campaign { - public: - static Campaign fromJson(const Json::Value &json); - - std::string id; - std::string name; - int64_t size; - bool autoAccept; - std::string description; - int estInstallationDuration; - int estPreparationDuration; -}; - -std::vector campaignsFromJson(const Json::Value &json); -std::vector fetchAvailableCampaigns(HttpInterface &http_client, const std::string &tls_server); -} // namespace campaign - -#endif diff --git a/src/libaktualizr/campaign/campaign_test.cc b/src/libaktualizr/campaign/campaign_test.cc index 6ec9ea2ec4..ba31893f69 100644 --- a/src/libaktualizr/campaign/campaign_test.cc +++ b/src/libaktualizr/campaign/campaign_test.cc @@ -1,8 +1,9 @@ -#include +#include +#include -#include "campaign/campaign.h" +#include "libaktualizr/campaign.h" -#include +#include "utilities/utils.h" boost::filesystem::path test_data_dir; @@ -10,7 +11,7 @@ boost::filesystem::path test_data_dir; TEST(campaign, Campaigns_from_json) { auto json = Utils::parseJSONFile(test_data_dir / "campaigns_sample.json"); - auto campaigns = campaign::campaignsFromJson(json); + auto campaigns = campaign::Campaign::campaignsFromJson(json); EXPECT_EQ(campaigns.size(), 1); EXPECT_EQ(campaigns.at(0).name, "campaign1"); @@ -27,33 +28,53 @@ TEST(campaign, Campaigns_from_json) { bad4["campaigns"][0] = Json::Value(); bad4["campaigns"][0]["name"] = "a"; bad4["campaigns"][0]["id"] = "a"; - auto campaignsNoAutoAccept = campaign::campaignsFromJson(bad4); + auto campaignsNoAutoAccept = campaign::Campaign::campaignsFromJson(bad4); EXPECT_FALSE(campaignsNoAutoAccept.at(0).autoAccept); } +/* Get JSON from campaign. */ +TEST(campaign, Campaigns_to_json) { + auto json = Utils::parseJSONFile(test_data_dir / "campaigns_sample.json"); + + auto campaigns = campaign::Campaign::campaignsFromJson(json); + Json::Value res; + campaign::Campaign::JsonFromCampaigns(campaigns, res); + + EXPECT_EQ(res["campaigns"][0]["name"], "campaign1"); + EXPECT_EQ(res["campaigns"][0]["id"], "c2eb7e8d-8aa0-429d-883f-5ed8fdb2a493"); + EXPECT_EQ((res["campaigns"][0]["size"]).asInt64(), 62470); + EXPECT_EQ(res["campaigns"][0]["autoAccept"], true); + EXPECT_EQ(res["campaigns"][0]["metadata"][0]["type"], "DESCRIPTION"); + EXPECT_EQ(res["campaigns"][0]["metadata"][0]["value"], "this is my message to show on the device"); + EXPECT_EQ(res["campaigns"][0]["metadata"][1]["type"], "ESTIMATED_INSTALLATION_DURATION"); + EXPECT_EQ(res["campaigns"][0]["metadata"][1]["value"], "10"); + EXPECT_EQ(res["campaigns"][0]["metadata"][2]["type"], "ESTIMATED_PREPARATION_DURATION"); + EXPECT_EQ(res["campaigns"][0]["metadata"][2]["value"], "20"); +} + TEST(campaign, Campaigns_from_invalid_json) { // empty object - EXPECT_EQ(campaign::campaignsFromJson(Json::Value()).size(), 0); + EXPECT_EQ(campaign::Campaign::campaignsFromJson(Json::Value()).size(), 0); // naked array - EXPECT_EQ(campaign::campaignsFromJson(Json::Value(Json::arrayValue)).size(), 0); + EXPECT_EQ(campaign::Campaign::campaignsFromJson(Json::Value(Json::arrayValue)).size(), 0); // object in object Json::Value bad1; bad1["campaigns"] = Json::Value(); - EXPECT_EQ(campaign::campaignsFromJson(bad1).size(), 0); + EXPECT_EQ(campaign::Campaign::campaignsFromJson(bad1).size(), 0); // array in array in object Json::Value bad2; bad2["campaigns"] = Json::Value(Json::arrayValue); bad2["campaigns"][0] = Json::Value(Json::arrayValue); - EXPECT_EQ(campaign::campaignsFromJson(bad2).size(), 0); + EXPECT_EQ(campaign::Campaign::campaignsFromJson(bad2).size(), 0); // no name Json::Value bad3; bad3["campaigns"] = Json::Value(Json::arrayValue); bad3["campaigns"][0] = Json::Value(); - EXPECT_EQ(campaign::campaignsFromJson(bad3).size(), 0); + EXPECT_EQ(campaign::Campaign::campaignsFromJson(bad3).size(), 0); } #ifndef __NO_MAIN__ diff --git a/src/libaktualizr/config/CMakeLists.txt b/src/libaktualizr/config/CMakeLists.txt index 2468f85ab2..5a311dba7b 100644 --- a/src/libaktualizr/config/CMakeLists.txt +++ b/src/libaktualizr/config/CMakeLists.txt @@ -1,17 +1,12 @@ -set(HEADERS config.h) -set(SOURCES config.cc) -include(AddAktualizrTest) +set(SOURCES base_config.cc config.cc) -add_library(config OBJECT ${SOURCES}) -# note: the Config object is composed with multiple sub-config objects that live -# close to the modules their refer too. To make the config module as -# self-contained as possible, the method definitions of these sub objects are -# also added to the module with CMake `target_sources(config PRIVATE ...)` -# declarations. +target_sources(config PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/base_config.cc) + +add_library(primary_config OBJECT config.cc) add_aktualizr_test(NAME config SOURCES config_test.cc ARGS ${PROJECT_BINARY_DIR} PROJECT_WORKING_DIRECTORY) # config file test for collisions between import and FS->SQL migration paths add_test(NAME config-import COMMAND ${PROJECT_SOURCE_DIR}/tests/run_import_clash_test.sh ${PROJECT_SOURCE_DIR}/config) -aktualizr_source_file_checks(${SOURCES} ${HEADERS} config_test.cc) +aktualizr_source_file_checks(${SOURCES} config_test.cc) diff --git a/src/libaktualizr/config/base_config.cc b/src/libaktualizr/config/base_config.cc new file mode 100644 index 0000000000..c0414b807c --- /dev/null +++ b/src/libaktualizr/config/base_config.cc @@ -0,0 +1,64 @@ +#include "libaktualizr/config.h" + +#include + +#include "logging/logging.h" +#include "utilities/config_utils.h" +#include "utilities/utils.h" + +void BaseConfig::checkDirs(const std::vector& configs) { + for (const auto& config : configs) { + if (!boost::filesystem::exists(config)) { + throw std::runtime_error("Config directory " + config.string() + " does not exist."); + } + } +} + +void BaseConfig::updateFromToml(const boost::filesystem::path& filename) { + LOG_INFO << "Reading config: " << filename; + if (!boost::filesystem::exists(filename)) { + throw std::runtime_error("Config file " + filename.string() + " does not exist."); + } + boost::property_tree::ptree pt; + boost::property_tree::ini_parser::read_ini(filename.string(), pt); + updateFromPropertyTree(pt); +} + +void BaseConfig::updateFromDirs(const std::vector& configs) { + std::map configs_map; + for (const auto& config : configs) { + if (!boost::filesystem::exists(config)) { + continue; + } + if (boost::filesystem::is_directory(config)) { + for (const auto& config_file : Utils::getDirEntriesByExt(config, ".toml")) { + configs_map[config_file.filename().string()] = config_file; + } + } else { + configs_map[config.filename().string()] = config; + } + } + for (const auto& config_file : configs_map) { + updateFromToml(config_file.second); + } +} + +void P11Config::updateFromPropertyTree(const boost::property_tree::ptree& pt) { + CopyFromConfig(module, "module", pt); + CopyFromConfig(pass, "pass", pt); + CopyFromConfig(label, "label", pt); + CopyFromConfig(uptane_key_id, "uptane_key_id", pt); + CopyFromConfig(tls_cacert_id, "tls_cacert_id", pt); + CopyFromConfig(tls_pkey_id, "tls_pkey_id", pt); + CopyFromConfig(tls_clientcert_id, "tls_clientcert_id", pt); +} + +void P11Config::writeToStream(std::ostream& out_stream) const { + writeOption(out_stream, module, "module"); + writeOption(out_stream, pass, "pass"); + writeOption(out_stream, label, "label"); + writeOption(out_stream, uptane_key_id, "uptane_key_id"); + writeOption(out_stream, tls_cacert_id, "tls_ca_id"); + writeOption(out_stream, tls_pkey_id, "tls_pkey_id"); + writeOption(out_stream, tls_clientcert_id, "tls_clientcert_id"); +} diff --git a/src/libaktualizr/config/config.cc b/src/libaktualizr/config/config.cc index 771d7e1e0b..b67d3fddd3 100644 --- a/src/libaktualizr/config/config.cc +++ b/src/libaktualizr/config/config.cc @@ -1,15 +1,50 @@ -#include "config.h" - #include -#include -#include +#include #include #include "bootstrap/bootstrap.h" -#include "config.h" +#include "libaktualizr/config.h" +#include "utilities/config_utils.h" #include "utilities/exceptions.h" #include "utilities/utils.h" +std::ostream& operator<<(std::ostream& os, ProvisionMode mode) { + std::string mode_s; + switch (mode) { + case ProvisionMode::kSharedCred: + mode_s = "SharedCred"; + break; + case ProvisionMode::kDeviceCred: + mode_s = "DeviceCred"; + break; + case ProvisionMode::kSharedCredReuse: + mode_s = "SharedCredReuse"; + break; + default: + mode_s = "Default"; + break; + } + os << '"' << mode_s << '"'; + return os; +} + +template <> +inline void CopyFromConfig(ProvisionMode& dest, const std::string& option_name, const boost::property_tree::ptree& pt) { + boost::optional value = pt.get_optional(option_name); + if (value.is_initialized()) { + std::string provision_mode{StripQuotesFromStrings(value.get())}; + if (provision_mode == "SharedCred") { + dest = ProvisionMode::kSharedCred; + } else if (provision_mode == "DeviceCred") { + dest = ProvisionMode::kDeviceCred; + } else if (provision_mode == "SharedCredReuse") { + dest = ProvisionMode::kSharedCredReuse; + } else { + dest = ProvisionMode::kDefault; + } + } +} + void TlsConfig::updateFromPropertyTree(const boost::property_tree::ptree& pt) { CopyFromConfig(server, "server", pt); CopyFromConfig(server_url_path, "server_url_path", pt); @@ -35,7 +70,7 @@ void ProvisionConfig::updateFromPropertyTree(const boost::property_tree::ptree& CopyFromConfig(primary_ecu_serial, "primary_ecu_serial", pt); CopyFromConfig(primary_ecu_hardware_id, "primary_ecu_hardware_id", pt); CopyFromConfig(ecu_registration_endpoint, "ecu_registration_endpoint", pt); - // provision.mode is set in postUpdateValues. + CopyFromConfig(mode, "mode", pt); } void ProvisionConfig::writeToStream(std::ostream& out_stream) const { @@ -47,7 +82,7 @@ void ProvisionConfig::writeToStream(std::ostream& out_stream) const { writeOption(out_stream, primary_ecu_serial, "primary_ecu_serial"); writeOption(out_stream, primary_ecu_hardware_id, "primary_ecu_hardware_id"); writeOption(out_stream, ecu_registration_endpoint, "ecu_registration_endpoint"); - // Skip provision.mode since it is dependent on other options. + writeOption(out_stream, mode, "mode"); } void UptaneConfig::updateFromPropertyTree(const boost::property_tree::ptree& pt) { @@ -58,6 +93,7 @@ void UptaneConfig::updateFromPropertyTree(const boost::property_tree::ptree& pt) CopyFromConfig(key_type, "key_type", pt); CopyFromConfig(force_install_completion, "force_install_completion", pt); CopyFromConfig(secondary_config_file, "secondary_config_file", pt); + CopyFromConfig(secondary_preinstall_wait_sec, "secondary_preinstall_wait_sec", pt); } void UptaneConfig::writeToStream(std::ostream& out_stream) const { @@ -68,6 +104,7 @@ void UptaneConfig::writeToStream(std::ostream& out_stream) const { writeOption(out_stream, key_type, "key_type"); writeOption(out_stream, force_install_completion, "force_install_completion"); writeOption(out_stream, secondary_config_file, "secondary_config_file"); + writeOption(out_stream, secondary_preinstall_wait_sec, "secondary_preinstall_wait_sec"); } /** @@ -120,13 +157,17 @@ KeyManagerConfig Config::keymanagerConfig() const { void Config::postUpdateValues() { logger_set_threshold(logger); - provision.mode = provision.provision_path.empty() ? ProvisionMode::kDeviceCred : ProvisionMode::kSharedCred; + if (provision.mode == ProvisionMode::kDefault) { + provision.mode = provision.provision_path.empty() ? ProvisionMode::kDeviceCred : ProvisionMode::kSharedCred; + } else if (provision.mode == ProvisionMode::kSharedCredReuse) { + LOG_INFO << "Provisioning mode is set to reuse shared credentials. This should only be used for testing!"; + } if (tls.server.empty()) { if (!tls.server_url_path.empty()) { try { tls.server = Utils::readFile(tls.server_url_path, true); - } catch (const boost::filesystem::filesystem_error& e) { + } catch (const std::exception& e) { LOG_ERROR << "Couldn't read gateway URL: " << e.what(); tls.server = ""; } @@ -207,9 +248,6 @@ void Config::updateFromCommandLine(const boost::program_options::variables_map& if (cmd.count("director-server") != 0) { uptane.director_server = cmd["director-server"].as(); } - if (cmd.count("ostree-server") != 0) { - pacman.ostree_server = cmd["ostree-server"].as(); - } if (cmd.count("primary-ecu-serial") != 0) { provision.primary_ecu_serial = cmd["primary-ecu-serial"].as(); } diff --git a/src/libaktualizr/config/config.h b/src/libaktualizr/config/config.h deleted file mode 100644 index 1591145542..0000000000 --- a/src/libaktualizr/config/config.h +++ /dev/null @@ -1,109 +0,0 @@ -#ifndef CONFIG_H_ -#define CONFIG_H_ - -#include -#include -#include -#include - -#include -#include -#include -#include "bootloader/bootloader.h" -#include "crypto/keymanager_config.h" -#include "crypto/p11_config.h" -#include "logging/logging_config.h" -#include "package_manager/packagemanagerconfig.h" -#include "storage/storage_config.h" -#include "telemetry/telemetryconfig.h" -#include "utilities/config_utils.h" -#include "utilities/types.h" - -enum class ProvisionMode { kSharedCred = 0, kDeviceCred }; - -// Try to keep the order of config options the same as in Config::writeToStream() -// and Config::updateFromPropertyTree() in config.cc. - -struct TlsConfig { - std::string server; - boost::filesystem::path server_url_path; - CryptoSource ca_source{CryptoSource::kFile}; - CryptoSource pkey_source{CryptoSource::kFile}; - CryptoSource cert_source{CryptoSource::kFile}; - - void updateFromPropertyTree(const boost::property_tree::ptree& pt); - void writeToStream(std::ostream& out_stream) const; -}; - -struct ProvisionConfig { - std::string server; - std::string p12_password; - std::string expiry_days{"36000"}; - boost::filesystem::path provision_path; - ProvisionMode mode{ProvisionMode::kSharedCred}; - std::string device_id; - std::string primary_ecu_serial; - std::string primary_ecu_hardware_id; - std::string ecu_registration_endpoint; - - void updateFromPropertyTree(const boost::property_tree::ptree& pt); - void writeToStream(std::ostream& out_stream) const; -}; - -struct UptaneConfig { - uint64_t polling_sec{10u}; - std::string director_server; - std::string repo_server; - CryptoSource key_source{CryptoSource::kFile}; - KeyType key_type{KeyType::kRSA2048}; - bool force_install_completion{false}; - boost::filesystem::path secondary_config_file; - - void updateFromPropertyTree(const boost::property_tree::ptree& pt); - void writeToStream(std::ostream& out_stream) const; -}; - -/** - * Configuration object for an aktualizr instance running on a primary ECU. - * - * This class is a parent to a series of smaller configuration objects for - * specific subsystems. Note that most other aktualizr-related tools have their - * own parent configuration objects with a reduced set of members. - */ -class Config : public BaseConfig { - public: - Config(); - explicit Config(const boost::program_options::variables_map& cmd); - explicit Config(const boost::filesystem::path& filename); - explicit Config(const std::vector& config_dirs); - - KeyManagerConfig keymanagerConfig() const; - - void updateFromTomlString(const std::string& contents); - void postUpdateValues(); - void writeToStream(std::ostream& sink) const; - - // Config data structures. Keep logger first so that it is taken into account - // while processing the others. - LoggerConfig logger; - P11Config p11; - TlsConfig tls; - ProvisionConfig provision; - UptaneConfig uptane; - PackageConfig pacman; - StorageConfig storage; - ImportConfig import; - TelemetryConfig telemetry; - BootloaderConfig bootloader; - - private: - void updateFromPropertyTree(const boost::property_tree::ptree& pt) override; - void updateFromCommandLine(const boost::program_options::variables_map& cmd); - - std::vector config_dirs_ = {"/usr/lib/sota/conf.d", "/etc/sota/conf.d/"}; - bool loglevel_from_cmdline{false}; -}; - -std::ostream& operator<<(std::ostream& os, const Config& cfg); - -#endif // CONFIG_H_ diff --git a/src/libaktualizr/config/config_test.cc b/src/libaktualizr/config/config_test.cc index 2c3e7d2f91..562f378212 100644 --- a/src/libaktualizr/config/config_test.cc +++ b/src/libaktualizr/config/config_test.cc @@ -8,8 +8,8 @@ #include #include "bootstrap/bootstrap.h" -#include "config/config.h" #include "crypto/crypto.h" +#include "libaktualizr/config.h" #include "test_utils.h" #include "utilities/utils.h" @@ -19,30 +19,30 @@ boost::filesystem::path build_dir; TEST(config, DefaultValues) { Config conf; EXPECT_EQ(conf.uptane.key_type, KeyType::kRSA2048); - EXPECT_EQ(conf.uptane.polling_sec, 10u); + EXPECT_EQ(conf.uptane.polling_sec, 300u); } TEST(config, TomlBasic) { Config conf("tests/config/basic.toml"); - EXPECT_EQ(conf.pacman.type, PackageManager::kNone); + EXPECT_EQ(conf.pacman.type, PACKAGE_MANAGER_NONE); } TEST(config, TomlEmpty) { Config conf; conf.updateFromTomlString(""); EXPECT_EQ(conf.uptane.key_type, KeyType::kRSA2048); - EXPECT_EQ(conf.uptane.polling_sec, 10u); + EXPECT_EQ(conf.uptane.polling_sec, 300u); } TEST(config, TomlInt) { Config conf; - conf.updateFromTomlString("[uptane]\nkey_type = ED25519\npolling_sec = 99\n"); + conf.updateFromTomlString("[uptane]\nkey_type = \"ED25519\"\npolling_sec = 99\n"); EXPECT_EQ(conf.uptane.key_type, KeyType::kED25519); EXPECT_EQ(conf.uptane.polling_sec, 99u); } /* - * Check that user can specify primary serial via a config file. + * Check that user can specify Primary serial via a config file. */ TEST(config, TomlPrimarySerial) { RecordProperty("zephyr_key", "OTA-988"); @@ -51,7 +51,7 @@ TEST(config, TomlPrimarySerial) { } /* - * Check that user can specify primary serial on the command line. + * Check that user can specify Primary serial on the command line. */ TEST(config, CmdlPrimarySerial) { RecordProperty("zephyr_key", "OTA-988"); @@ -108,10 +108,18 @@ TEST(config, DeviceCredMode) { * Start in shared credential provisioning mode. */ TEST(config, SharedCredMode) { - Config config("tests/config/basic.toml"); + Config config("config/sota-local.toml"); EXPECT_EQ(config.provision.mode, ProvisionMode::kSharedCred); } +/** + * Start in shared credential provisioning mode with reuse. + */ +TEST(config, SharedCredReuseMode) { + Config config("tests/config/basic.toml"); + EXPECT_EQ(config.provision.mode, ProvisionMode::kSharedCredReuse); +} + /* Write config to file or to the log. * We don't normally dump the config to file anymore, but we do write it to the * log. */ @@ -217,7 +225,7 @@ TEST(config, TwoDirs) { void checkConfigExpectations(const Config &conf) { EXPECT_EQ(conf.storage.type, StorageType::kSqlite); - EXPECT_EQ(conf.pacman.type, PackageManager::kNone); + EXPECT_EQ(conf.pacman.type, PACKAGE_MANAGER_NONE); EXPECT_EQ(conf.tls.ca_source, CryptoSource::kPkcs11); EXPECT_EQ(conf.tls.pkey_source, CryptoSource::kPkcs11); EXPECT_EQ(conf.tls.cert_source, CryptoSource::kPkcs11); diff --git a/src/libaktualizr/crypto/CMakeLists.txt b/src/libaktualizr/crypto/CMakeLists.txt index 173212e692..2c63a3118c 100644 --- a/src/libaktualizr/crypto/CMakeLists.txt +++ b/src/libaktualizr/crypto/CMakeLists.txt @@ -2,7 +2,6 @@ set(SOURCES crypto.cc keymanager.cc) set(HEADERS crypto.h - keymanager_config.h keymanager.h openssl_compat.h) @@ -13,15 +12,18 @@ aktualizr_source_file_checks(${SOURCES} ${HEADERS}) if(BUILD_P11) target_sources(crypto PRIVATE p11engine.cc) - if(TEST_PKCS11_MODULE_PATH) - add_definitions(-DTEST_PKCS11_MODULE_PATH="${TEST_PKCS11_MODULE_PATH}" -DTEST_PKCS11_ENGINE_PATH="${TEST_PKCS11_ENGINE_PATH}") - endif(TEST_PKCS11_MODULE_PATH) + if(PKCS11_ENGINE_PATH) + set_property(SOURCE p11engine.cc PROPERTY COMPILE_DEFINITIONS PKCS11_ENGINE_PATH="${PKCS11_ENGINE_PATH}") + endif(PKCS11_ENGINE_PATH) else(BUILD_P11) target_sources(crypto PRIVATE p11engine_dummy.cc) endif(BUILD_P11) - add_aktualizr_test(NAME crypto SOURCES crypto_test.cc PROJECT_WORKING_DIRECTORY) +add_aktualizr_test(NAME hash SOURCES hash_test.cc PROJECT_WORKING_DIRECTORY) add_aktualizr_test(NAME keymanager SOURCES keymanager_test.cc PROJECT_WORKING_DIRECTORY) +set_property(SOURCE crypto_test.cc keymanager_test.cc PROPERTY COMPILE_DEFINITIONS TEST_PKCS11_MODULE_PATH="${TEST_PKCS11_MODULE_PATH}") + +set_tests_properties(test_crypto test_hash test_keymanager PROPERTIES LABELS "crypto") -aktualizr_source_file_checks(p11engine.cc p11engine_dummy.cc p11_config.h p11engine.h ${TEST_SOURCES}) +aktualizr_source_file_checks(p11engine.cc p11engine_dummy.cc p11engine.h ${TEST_SOURCES}) diff --git a/src/libaktualizr/crypto/crypto.cc b/src/libaktualizr/crypto/crypto.cc index 33027b447a..d4f05f131d 100644 --- a/src/libaktualizr/crypto/crypto.cc +++ b/src/libaktualizr/crypto/crypto.cc @@ -1,11 +1,19 @@ #include "crypto.h" -#include -#include +#include #include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include "libaktualizr/types.h" #include "logging/logging.h" #include "openssl_compat.h" #include "utilities/utils.h" @@ -14,24 +22,33 @@ PublicKey::PublicKey(const boost::filesystem::path &path) : value_(Utils::readFi type_ = Crypto::IdentifyRSAKeyType(value_); } -PublicKey::PublicKey(Json::Value uptane_json) { - if (!uptane_json["keytype"].isString()) { - type_ = KeyType::kUnknown; - return; - } - if (!uptane_json["keyval"].isObject()) { - type_ = KeyType::kUnknown; - return; - } +PublicKey::PublicKey(const Json::Value &uptane_json) { + std::string keytype; + std::string keyvalue; - if (!uptane_json["keyval"]["public"].isString()) { + try { + if (!uptane_json["keytype"].isString()) { + type_ = KeyType::kUnknown; + return; + } + if (!uptane_json["keyval"].isObject()) { + type_ = KeyType::kUnknown; + return; + } + + if (!uptane_json["keyval"]["public"].isString()) { + type_ = KeyType::kUnknown; + return; + } + + keytype = uptane_json["keytype"].asString(); + keyvalue = uptane_json["keyval"]["public"].asString(); + } catch (const std::exception &ex) { + LOG_ERROR << "Failed to initialize public key: " << ex.what(); type_ = KeyType::kUnknown; return; } - std::string keytype = uptane_json["keytype"].asString(); - std::string keyvalue = uptane_json["keyval"]["public"].asString(); - std::transform(keytype.begin(), keytype.end(), keytype.begin(), ::tolower); KeyType type; @@ -49,10 +66,10 @@ PublicKey::PublicKey(Json::Value uptane_json) { value_ = keyvalue; } -PublicKey::PublicKey(std::string value, KeyType type) : value_(std::move(value)), type_(type) { +PublicKey::PublicKey(const std::string &value, KeyType type) : value_(value), type_(type) { if (Crypto::IsRsaKeyType(type)) { if (type != Crypto::IdentifyRSAKeyType(value)) { - std::logic_error("RSA key length is incorrect"); + throw std::logic_error("RSA key length is incorrect"); } } } @@ -94,29 +111,38 @@ Json::Value PublicKey::ToUptane() const { std::string PublicKey::KeyId() const { std::string key_content = value_; + // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) boost::algorithm::trim_right_if(key_content, boost::algorithm::is_any_of("\n")); - std::string keyid = boost::algorithm::hex(Crypto::sha256digest(Json::FastWriter().write(Json::Value(key_content)))); + std::string keyid = boost::algorithm::hex(Crypto::sha256digest(Utils::jsonToCanonicalStr(Json::Value(key_content)))); std::transform(keyid.begin(), keyid.end(), keyid.begin(), ::tolower); return keyid; } std::string Crypto::sha256digest(const std::string &text) { - unsigned char sha256_hash[crypto_hash_sha256_BYTES]; - crypto_hash_sha256(sha256_hash, reinterpret_cast(text.c_str()), text.size()); - return std::string(reinterpret_cast(sha256_hash), crypto_hash_sha256_BYTES); + std::array sha256_hash{}; + crypto_hash_sha256(sha256_hash.data(), reinterpret_cast(text.c_str()), text.size()); + return std::string(reinterpret_cast(sha256_hash.data()), crypto_hash_sha256_BYTES); +} + +std::string Crypto::sha256digestHex(const std::string &text) { + return boost::algorithm::to_lower_copy(boost::algorithm::hex(sha256digest(text))); } std::string Crypto::sha512digest(const std::string &text) { - unsigned char sha512_hash[crypto_hash_sha512_BYTES]; - crypto_hash_sha512(sha512_hash, reinterpret_cast(text.c_str()), text.size()); - return std::string(reinterpret_cast(sha512_hash), crypto_hash_sha512_BYTES); + std::array sha512_hash{}; + crypto_hash_sha512(sha512_hash.data(), reinterpret_cast(text.c_str()), text.size()); + return std::string(reinterpret_cast(sha512_hash.data()), crypto_hash_sha512_BYTES); +} + +std::string Crypto::sha512digestHex(const std::string &text) { + return boost::algorithm::to_lower_copy(boost::algorithm::hex(sha512digest(text))); } std::string Crypto::RSAPSSSign(ENGINE *engine, const std::string &private_key, const std::string &message) { StructGuard key(nullptr, EVP_PKEY_free); StructGuard rsa(nullptr, RSA_free); if (engine != nullptr) { - // FIXME: this call leaks memory somehow... + // TODO(OTA-2138): this call leaks memory somehow... key.reset(ENGINE_load_private_key(engine, private_key.c_str(), nullptr, nullptr)); if (key == nullptr) { @@ -179,10 +205,10 @@ std::string Crypto::Sign(KeyType key_type, ENGINE *engine, const std::string &pr } std::string Crypto::ED25519Sign(const std::string &private_key, const std::string &message) { - unsigned char sig[crypto_sign_BYTES]; - crypto_sign_detached(sig, nullptr, reinterpret_cast(message.c_str()), message.size(), + std::array sig{}; + crypto_sign_detached(sig.data(), nullptr, reinterpret_cast(message.c_str()), message.size(), reinterpret_cast(private_key.c_str())); - return std::string(reinterpret_cast(sig), crypto_sign_BYTES); + return std::string(reinterpret_cast(sig.data()), crypto_sign_BYTES); } bool Crypto::RSAPSSVerify(const std::string &public_key, const std::string &signature, const std::string &message) { @@ -246,9 +272,7 @@ bool Crypto::parseP12(BIO *p12_bio, const std::string &p12_password, std::string } // use a lambda here because sk_X509_pop_free is a macro - auto stackx509_free = [](STACK_OF(X509) * stack) { - sk_X509_pop_free(stack, X509_free); // NOLINT - }; + auto stackx509_free = [](STACK_OF(X509) * stack) { sk_X509_pop_free(stack, X509_free); }; StructGuard pkey(nullptr, EVP_PKEY_free); StructGuard x509_cert(nullptr, X509_free); @@ -274,7 +298,7 @@ bool Crypto::parseP12(BIO *p12_bio, const std::string &p12_password, std::string PEM_write_bio_PrivateKey(pkey_pem_sink.get(), pkey.get(), nullptr, nullptr, 0, nullptr, nullptr); char *pkey_buf; - auto pkey_len = BIO_get_mem_data(pkey_pem_sink.get(), &pkey_buf); // NOLINT + auto pkey_len = BIO_get_mem_data(pkey_pem_sink.get(), &pkey_buf); // NOLINT(cppcoreguidelines-pro-type-cstyle-cast) *out_pkey = std::string(pkey_buf, static_cast(pkey_len)); char *cert_buf; @@ -294,35 +318,52 @@ bool Crypto::parseP12(BIO *p12_bio, const std::string &p12_password, std::string return false; } X509 *ca_cert = nullptr; - for (int i = 0; i < sk_X509_num(ca_certs.get()); i++) { // NOLINT - ca_cert = sk_X509_value(ca_certs.get(), i); // NOLINT + for (int i = 0; i < sk_X509_num(ca_certs.get()); i++) { + ca_cert = sk_X509_value(ca_certs.get(), i); PEM_write_bio_X509(ca_sink.get(), ca_cert); PEM_write_bio_X509(cert_sink.get(), ca_cert); } - ca_len = static_cast(BIO_get_mem_data(ca_sink.get(), &ca_buf)); // NOLINT + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-cstyle-cast) + ca_len = static_cast(BIO_get_mem_data(ca_sink.get(), &ca_buf)); *out_ca = std::string(ca_buf, ca_len); - cert_len = static_cast(BIO_get_mem_data(cert_sink.get(), &cert_buf)); // NOLINT + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-cstyle-cast) + cert_len = static_cast(BIO_get_mem_data(cert_sink.get(), &cert_buf)); *out_cert = std::string(cert_buf, cert_len); return true; } -bool Crypto::extractSubjectCN(const std::string &cert, std::string *cn) { +std::string Crypto::extractSubjectCN(const std::string &cert) { StructGuard bio(BIO_new_mem_buf(const_cast(cert.c_str()), static_cast(cert.size())), BIO_vfree); StructGuard x(PEM_read_bio_X509(bio.get(), nullptr, nullptr, nullptr), X509_free); if (x == nullptr) { - return false; + throw std::runtime_error("Could not parse certificate"); } int len = X509_NAME_get_text_by_NID(X509_get_subject_name(x.get()), NID_commonName, nullptr, 0); if (len < 0) { - return false; + throw std::runtime_error("Could not get CN from certificate"); } boost::scoped_array buf(new char[len + 1]); X509_NAME_get_text_by_NID(X509_get_subject_name(x.get()), NID_commonName, buf.get(), len + 1); - *cn = std::string(buf.get()); - return true; + return std::string(buf.get()); +} + +std::string Crypto::extractSubjectBC(const std::string &cert) { + StructGuard bio(BIO_new_mem_buf(const_cast(cert.c_str()), static_cast(cert.size())), BIO_vfree); + StructGuard x(PEM_read_bio_X509(bio.get(), nullptr, nullptr, nullptr), X509_free); + if (x == nullptr) { + throw std::runtime_error("Could not parse certificate"); + } + + int len = X509_NAME_get_text_by_NID(X509_get_subject_name(x.get()), NID_businessCategory, nullptr, 0); + if (len < 0) { + return ""; + } + boost::scoped_array buf(new char[len + 1]); + X509_NAME_get_text_by_NID(X509_get_subject_name(x.get()), NID_businessCategory, buf.get(), len + 1); + return std::string(buf.get()); } StructGuard Crypto::generateRSAKeyPairEVP(KeyType key_type) { @@ -341,31 +382,42 @@ StructGuard Crypto::generateRSAKeyPairEVP(KeyType key_type) { return {nullptr, EVP_PKEY_free}; } -#if AKTUALIZR_OPENSSL_PRE_11 - StructGuard rsa(RSA_generate_key(bits, /* number of bits for the key - 2048 is a sensible value */ - RSA_F4, /* exponent - RSA_F4 is defined as 0x10001L */ - nullptr, /* callback - can be NULL if we aren't displaying progress */ - nullptr), /* callback argument - not needed in this case */ - RSA_free); -#else - int ret; + return Crypto::generateRSAKeyPairEVP(bits); +} + +StructGuard Crypto::generateRSAKeyPairEVP(const int bits) { + if (bits < 31) { // sic! + throw std::runtime_error("RSA key size can't be smaller than 31 bits"); + } + + int ret = RAND_status(); + if (ret != 1) { /* random generator has NOT been seeded with enough data */ + ret = RAND_poll(); + if (ret != 1) { /* seed data was NOT generated */ + throw std::runtime_error("Random generator has not been sufficiently seeded."); + } + } + + /* exponent - RSA_F4 is defined as 0x10001L */ StructGuard bne(BN_new(), BN_free); - ret = BN_set_word(bne.get(), RSA_F4); - if (ret != 1) { - return {nullptr, EVP_PKEY_free}; + if (BN_set_word(bne.get(), RSA_F4) != 1) { + throw std::runtime_error(std::string("BN_set_word failed: ") + ERR_error_string(ERR_get_error(), nullptr)); } + StructGuard rsa(RSA_new(), RSA_free); - ret = RSA_generate_key_ex(rsa.get(), bits, /* number of bits for the key - 2048 is a sensible value */ - bne.get(), /* exponent - RSA_F4 is defined as 0x10001L */ - nullptr); /* callback argument - not needed in this case */ - if (ret != 1) { - return {nullptr, EVP_PKEY_free}; + if (RSA_generate_key_ex(rsa.get(), bits, bne.get(), nullptr) != 1) { + throw std::runtime_error(std::string("RSA_generate_key_ex failed: ") + ERR_error_string(ERR_get_error(), nullptr)); } -#endif StructGuard pkey(EVP_PKEY_new(), EVP_PKEY_free); + if (pkey.get() == nullptr) { + throw std::runtime_error(std::string("EVP_PKEY_new failed: ") + ERR_error_string(ERR_get_error(), nullptr)); + } + // release the rsa pointer here, pkey is the new owner - EVP_PKEY_assign_RSA(pkey.get(), rsa.release()); // NOLINT + if (!EVP_PKEY_assign_RSA(pkey.get(), rsa.release())) { // NOLINT(cppcoreguidelines-pro-type-cstyle-cast) + throw std::runtime_error(std::string("EVP_PKEY_assign_RSA failed: ") + ERR_error_string(ERR_get_error(), nullptr)); + } return pkey; } @@ -380,6 +432,9 @@ StructGuard Crypto::generateRSAKeyPairEVP(KeyType key_type) { bool Crypto::generateRSAKeyPair(KeyType key_type, std::string *public_key, std::string *private_key) { int ret = 0; StructGuard pkey = generateRSAKeyPairEVP(key_type); + if (pkey == nullptr) { + return false; + } char *pubkey_buf; StructGuard pubkey_sink(BIO_new(BIO_s_mem()), BIO_vfree); @@ -390,7 +445,7 @@ bool Crypto::generateRSAKeyPair(KeyType key_type, std::string *public_key, std:: if (ret != 1) { return false; } - auto pubkey_len = BIO_get_mem_data(pubkey_sink.get(), &pubkey_buf); // NOLINT + auto pubkey_len = BIO_get_mem_data(pubkey_sink.get(), &pubkey_buf); // NOLINT(cppcoreguidelines-pro-type-cstyle-cast) *public_key = std::string(pubkey_buf, static_cast(pubkey_len)); char *privkey_buf; @@ -404,18 +459,19 @@ bool Crypto::generateRSAKeyPair(KeyType key_type, std::string *public_key, std:: if (ret != 1) { return false; } - auto privkey_len = BIO_get_mem_data(privkey_sink.get(), &privkey_buf); // NOLINT + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-cstyle-cast) + auto privkey_len = BIO_get_mem_data(privkey_sink.get(), &privkey_buf); *private_key = std::string(privkey_buf, static_cast(privkey_len)); return true; } bool Crypto::generateEDKeyPair(std::string *public_key, std::string *private_key) { - unsigned char pk[crypto_sign_PUBLICKEYBYTES]; - unsigned char sk[crypto_sign_SECRETKEYBYTES]; - crypto_sign_keypair(pk, sk); - *public_key = boost::algorithm::hex(std::string(reinterpret_cast(pk), crypto_sign_PUBLICKEYBYTES)); + std::array pk{}; + std::array sk{}; + crypto_sign_keypair(pk.data(), sk.data()); + *public_key = boost::algorithm::hex(std::string(reinterpret_cast(pk.data()), crypto_sign_PUBLICKEYBYTES)); // std::transform(public_key->begin(), public_key->end(), public_key->begin(), ::tolower); - *private_key = boost::algorithm::hex(std::string(reinterpret_cast(sk), crypto_sign_SECRETKEYBYTES)); + *private_key = boost::algorithm::hex(std::string(reinterpret_cast(sk.data()), crypto_sign_SECRETKEYBYTES)); // std::transform(private_key->begin(), private_key->end(), private_key->begin(), ::tolower); return true; } @@ -437,6 +493,7 @@ bool Crypto::IsRsaKeyType(KeyType type) { return false; } } + KeyType Crypto::IdentifyRSAKeyType(const std::string &public_key_pem) { StructGuard bufio(BIO_new_mem_buf(reinterpret_cast(public_key_pem.c_str()), static_cast(public_key_pem.length())), @@ -467,3 +524,254 @@ KeyType Crypto::IdentifyRSAKeyType(const std::string &public_key_pem) { return KeyType::kUnknown; } } + +StructGuard Crypto::generateCert(const int rsa_bits, const int cert_days, const std::string &cert_c, + const std::string &cert_st, const std::string &cert_o, + const std::string &cert_cn, bool self_sign) { + // create certificate + StructGuard certificate(X509_new(), X509_free); + if (certificate.get() == nullptr) { + throw std::runtime_error(std::string("X509_new failed: ") + ERR_error_string(ERR_get_error(), nullptr)); + } + + X509_set_version(certificate.get(), 2); // X509v3 + + { + std::random_device urandom; + std::uniform_int_distribution<> serial_dist(0, (1UL << 20) - 1); + ASN1_INTEGER_set(X509_get_serialNumber(certificate.get()), serial_dist(urandom)); + } + + // create and set certificate subject name + StructGuard subj(X509_NAME_new(), X509_NAME_free); + if (subj.get() == nullptr) { + throw std::runtime_error(std::string("X509_NAME_new failed: ") + ERR_error_string(ERR_get_error(), nullptr)); + } + + if (!cert_c.empty()) { + if (X509_NAME_add_entry_by_txt(subj.get(), "C", MBSTRING_ASC, + reinterpret_cast(cert_c.c_str()), -1, -1, 0) == 0) { + throw std::runtime_error(std::string("X509_NAME_add_entry_by_txt failed: ") + + ERR_error_string(ERR_get_error(), nullptr)); + } + } + + if (!cert_st.empty()) { + if (X509_NAME_add_entry_by_txt(subj.get(), "ST", MBSTRING_ASC, + reinterpret_cast(cert_st.c_str()), -1, -1, 0) == 0) { + throw std::runtime_error(std::string("X509_NAME_add_entry_by_txt failed: ") + + ERR_error_string(ERR_get_error(), nullptr)); + } + } + + if (!cert_o.empty()) { + if (X509_NAME_add_entry_by_txt(subj.get(), "O", MBSTRING_ASC, + reinterpret_cast(cert_o.c_str()), -1, -1, 0) == 0) { + throw std::runtime_error(std::string("X509_NAME_add_entry_by_txt failed: ") + + ERR_error_string(ERR_get_error(), nullptr)); + } + } + + assert(!cert_cn.empty()); + if (X509_NAME_add_entry_by_txt(subj.get(), "CN", MBSTRING_ASC, + reinterpret_cast(cert_cn.c_str()), -1, -1, 0) == 0) { + throw std::runtime_error(std::string("X509_NAME_add_entry_by_txt failed: ") + + ERR_error_string(ERR_get_error(), nullptr)); + } + + if (X509_set_subject_name(certificate.get(), subj.get()) == 0) { + throw std::runtime_error(std::string("X509_set_subject_name failed: ") + + ERR_error_string(ERR_get_error(), nullptr)); + } + + // create and set key. + StructGuard certificate_pkey(Crypto::generateRSAKeyPairEVP(rsa_bits)); + + if (X509_set_pubkey(certificate.get(), certificate_pkey.get()) == 0) { + throw std::runtime_error(std::string("X509_set_pubkey failed: ") + ERR_error_string(ERR_get_error(), nullptr)); + } + + // set validity period + if (X509_gmtime_adj(X509_get_notBefore(certificate.get()), 0) == nullptr) { + throw std::runtime_error(std::string("X509_gmtime_adj failed: ") + ERR_error_string(ERR_get_error(), nullptr)); + } + + if (X509_gmtime_adj(X509_get_notAfter(certificate.get()), 60L * 60L * 24L * cert_days) == nullptr) { + throw std::runtime_error(std::string("X509_gmtime_adj failed: ") + ERR_error_string(ERR_get_error(), nullptr)); + } + + // self-sign + if (self_sign) { + const EVP_MD *cert_digest = EVP_sha256(); + if (X509_sign(certificate.get(), certificate_pkey.get(), cert_digest) == 0) { + throw std::runtime_error(std::string("X509_sign failed: ") + ERR_error_string(ERR_get_error(), nullptr)); + } + LOG_INFO << "Successfully self-signed the generated certificate. This should not be used in production!"; + } + + return certificate; +} + +void Crypto::signCert(const std::string &cacert_path, const std::string &capkey_path, X509 *const certificate) { + // read CA certificate + std::string cacert_contents = Utils::readFile(cacert_path); + StructGuard bio_in_cacert(BIO_new_mem_buf(cacert_contents.c_str(), static_cast(cacert_contents.size())), + BIO_free_all); + StructGuard ca_certificate(PEM_read_bio_X509(bio_in_cacert.get(), nullptr, nullptr, nullptr), X509_free); + if (ca_certificate.get() == nullptr) { + throw std::runtime_error(std::string("Reading CA certificate failed: ") + + ERR_error_string(ERR_get_error(), nullptr)); + } + + // read CA private key + std::string capkey_contents = Utils::readFile(capkey_path); + StructGuard bio_in_capkey(BIO_new_mem_buf(capkey_contents.c_str(), static_cast(capkey_contents.size())), + BIO_free_all); + StructGuard ca_privkey(PEM_read_bio_PrivateKey(bio_in_capkey.get(), nullptr, nullptr, nullptr), + EVP_PKEY_free); + if (ca_privkey.get() == nullptr) { + throw std::runtime_error(std::string("PEM_read_bio_PrivateKey failed: ") + + ERR_error_string(ERR_get_error(), nullptr)); + } + + // set issuer name + X509_NAME *ca_subj = X509_get_subject_name(ca_certificate.get()); + if (ca_subj == nullptr) { + throw std::runtime_error(std::string("X509_get_subject_name failed: ") + + ERR_error_string(ERR_get_error(), nullptr)); + } + + if (X509_set_issuer_name(certificate, ca_subj) == 0) { + throw std::runtime_error(std::string("X509_set_issuer_name failed: ") + ERR_error_string(ERR_get_error(), nullptr)); + } + + // sign + const EVP_MD *cert_digest = EVP_sha256(); + if (X509_sign(certificate, ca_privkey.get(), cert_digest) == 0) { + throw std::runtime_error(std::string("X509_sign failed: ") + ERR_error_string(ERR_get_error(), nullptr)); + } +} + +void Crypto::serializeCert(std::string *pkey, std::string *cert, X509 *const certificate) { + // serialize private key + char *privkey_buf; + StructGuard privkey_file(BIO_new(BIO_s_mem()), BIO_vfree); + if (privkey_file == nullptr) { + throw std::runtime_error(std::string("BIO_new failed: ") + ERR_error_string(ERR_get_error(), nullptr)); + } + + StructGuard certificate_pkey(X509_get_pubkey(certificate), EVP_PKEY_free); + if (certificate_pkey == nullptr) { + throw std::runtime_error(std::string("X509_get_pubkey failed: ") + ERR_error_string(ERR_get_error(), nullptr)); + } + + StructGuard certificate_rsa(EVP_PKEY_get1_RSA(certificate_pkey.get()), RSA_free); + if (certificate_rsa == nullptr) { + throw std::runtime_error(std::string("EVP_PKEY_get1_RSA failed: ") + ERR_error_string(ERR_get_error(), nullptr)); + } + + int ret = + PEM_write_bio_RSAPrivateKey(privkey_file.get(), certificate_rsa.get(), nullptr, nullptr, 0, nullptr, nullptr); + if (ret == 0) { + throw std::runtime_error(std::string("PEM_write_RSAPrivateKey failed: ") + + ERR_error_string(ERR_get_error(), nullptr)); + } + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-cstyle-cast) + auto privkey_len = BIO_get_mem_data(privkey_file.get(), &privkey_buf); + *pkey = std::string(privkey_buf, static_cast(privkey_len)); + + // serialize certificate + char *cert_buf; + StructGuard cert_file(BIO_new(BIO_s_mem()), BIO_vfree); + if (cert_file == nullptr) { + throw std::runtime_error(std::string("BIO_new failed: ") + ERR_error_string(ERR_get_error(), nullptr)); + } + ret = PEM_write_bio_X509(cert_file.get(), certificate); + if (ret == 0) { + throw std::runtime_error(std::string("PEM_write_bio_X509 failed: ") + ERR_error_string(ERR_get_error(), nullptr)); + } + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-cstyle-cast) + auto cert_len = BIO_get_mem_data(cert_file.get(), &cert_buf); + *cert = std::string(cert_buf, static_cast(cert_len)); +} + +MultiPartHasher::Ptr MultiPartHasher::create(Hash::Type hash_type) { + switch (hash_type) { + case Hash::Type::kSha256: { + return std::make_shared(); + } + case Hash::Type::kSha512: { + return std::make_shared(); + } + default: { + LOG_ERROR << "Unsupported type of hashing: " << Hash::TypeString(hash_type); + return nullptr; + } + } +} + +std::string MultiPartSHA512Hasher::getHexDigest() { + std::array sha512_hash{}; + crypto_hash_sha512_final(&state_, sha512_hash.data()); + return boost::algorithm::hex(std::string(reinterpret_cast(sha512_hash.data()), crypto_hash_sha512_BYTES)); +} + +std::string MultiPartSHA256Hasher::getHexDigest() { + std::array sha256_hash{}; + crypto_hash_sha256_final(&state_, sha256_hash.data()); + return boost::algorithm::hex(std::string(reinterpret_cast(sha256_hash.data()), crypto_hash_sha256_BYTES)); +} + +Hash Hash::generate(Type type, const std::string &data) { + std::string hash; + + switch (type) { + case Type::kSha256: { + hash = boost::algorithm::hex(Crypto::sha256digest(data)); + break; + } + case Type::kSha512: { + hash = boost::algorithm::hex(Crypto::sha512digest(data)); + break; + } + default: { + throw std::invalid_argument("Unsupported hash type"); + } + } + + return Hash(type, hash); +} + +Hash::Hash(const std::string &type, const std::string &hash) : hash_(boost::algorithm::to_upper_copy(hash)) { + if (type == "sha512") { + type_ = Hash::Type::kSha512; + } else if (type == "sha256") { + type_ = Hash::Type::kSha256; + } else { + type_ = Hash::Type::kUnknownAlgorithm; + } +} + +Hash::Hash(Type type, const std::string &hash) : type_(type), hash_(boost::algorithm::to_upper_copy(hash)) {} + +bool Hash::operator==(const Hash &other) const { return type_ == other.type_ && hash_ == other.hash_; } + +std::string Hash::TypeString(Type type) { + switch (type) { + case Type::kSha256: + return "sha256"; + case Type::kSha512: + return "sha512"; + default: + return "unknown"; + } +} + +std::string Hash::TypeString() const { return TypeString(type_); } + +Hash::Type Hash::type() const { return type_; } + +std::ostream &operator<<(std::ostream &os, const Hash &h) { + os << "Hash: " << h.hash_; + return os; +} diff --git a/src/libaktualizr/crypto/crypto.h b/src/libaktualizr/crypto/crypto.h index a3da4ebc78..6591c6224b 100644 --- a/src/libaktualizr/crypto/crypto.h +++ b/src/libaktualizr/crypto/crypto.h @@ -1,82 +1,59 @@ #ifndef CRYPTO_H_ #define CRYPTO_H_ +#include // for X509, BIO, ENGINE, EVP_PKEY +#include // for crypto_hash_sha256_init, cryp... +#include // for crypto_hash_sha512_init, cryp... + +#include // for copy +#include // for array +#include // for uint64_t +#include // for shared_ptr +#include // for string + +#include "libaktualizr/types.h" // for Hash, KeyType, Hash::Type +#include "utilities/utils.h" // for StructGuard + #include #include #include -#include #include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "utilities/types.h" -#include "utilities/utils.h" -// some older versions of openssl have BIO_new_mem_buf defined with fisrt parameter of type (void*) +// some older versions of openssl have BIO_new_mem_buf defined with first parameter of type (void*) // which is not true and breaks our build #undef BIO_new_mem_buf -BIO *BIO_new_mem_buf(const void *, int); +BIO *BIO_new_mem_buf(const void *, int); // NOLINT(readability-redundant-declaration) -class PublicKey { +class MultiPartHasher { public: - PublicKey() = default; - explicit PublicKey(const boost::filesystem::path &path); - - explicit PublicKey(Json::Value uptane_json); - - PublicKey(std::string value, KeyType type); - - std::string Value() const { return value_; } - - KeyType Type() const { return type_; } - /** - * Verify a signature using this public key - */ - bool VerifySignature(const std::string &signature, const std::string &message) const; - /** - * Uptane Json representation of this public key. Used in root.json - * and during provisioning. - */ - Json::Value ToUptane() const; - - std::string KeyId() const; - bool operator==(const PublicKey &rhs) const; - - bool operator!=(const PublicKey &rhs) const { return !(*this == rhs); } + MultiPartHasher() = default; + virtual ~MultiPartHasher() = default; + MultiPartHasher(const MultiPartHasher &) = delete; + MultiPartHasher(MultiPartHasher &&) = delete; + MultiPartHasher &operator=(const MultiPartHasher &) = delete; + MultiPartHasher &operator=(MultiPartHasher &&) = delete; - private: - // std::string can be implicitly converted to a Json::Value. Make sure that - // the Json::Value constructor is not called accidentally. - PublicKey(std::string); - std::string value_; - KeyType type_{KeyType::kUnknown}; -}; + using Ptr = std::shared_ptr; + static Ptr create(Hash::Type hash_type); -class MultiPartHasher { - public: virtual void update(const unsigned char *part, uint64_t size) = 0; + virtual void reset() = 0; virtual std::string getHexDigest() = 0; - virtual ~MultiPartHasher() = default; + virtual Hash getHash() = 0; }; class MultiPartSHA512Hasher : public MultiPartHasher { public: MultiPartSHA512Hasher() { crypto_hash_sha512_init(&state_); } ~MultiPartSHA512Hasher() override = default; + MultiPartSHA512Hasher(const MultiPartSHA512Hasher &) = delete; + MultiPartSHA512Hasher(MultiPartSHA512Hasher &&) = delete; + MultiPartSHA512Hasher &operator=(const MultiPartSHA512Hasher &) = delete; + MultiPartSHA512Hasher &operator=(MultiPartSHA512Hasher &&) = delete; void update(const unsigned char *part, uint64_t size) override { crypto_hash_sha512_update(&state_, part, size); } - std::string getHexDigest() override { - unsigned char sha512_hash[crypto_hash_sha512_BYTES]; - crypto_hash_sha512_final(&state_, static_cast(sha512_hash)); - return boost::algorithm::hex(std::string(reinterpret_cast(sha512_hash), crypto_hash_sha512_BYTES)); - } + void reset() override { crypto_hash_sha512_init(&state_); } + std::string getHexDigest() override; + Hash getHash() override { return Hash(Hash::Type::kSha512, getHexDigest()); } private: crypto_hash_sha512_state state_{}; @@ -86,12 +63,15 @@ class MultiPartSHA256Hasher : public MultiPartHasher { public: MultiPartSHA256Hasher() { crypto_hash_sha256_init(&state_); } ~MultiPartSHA256Hasher() override = default; + MultiPartSHA256Hasher(const MultiPartSHA256Hasher &) = delete; + MultiPartSHA256Hasher(MultiPartSHA256Hasher &&) = delete; + MultiPartSHA256Hasher &operator=(const MultiPartSHA256Hasher &) = delete; + MultiPartSHA256Hasher &operator=(MultiPartSHA256Hasher &&) = delete; void update(const unsigned char *part, uint64_t size) override { crypto_hash_sha256_update(&state_, part, size); } - std::string getHexDigest() override { - unsigned char sha256_hash[crypto_hash_sha256_BYTES]; - crypto_hash_sha256_final(&state_, static_cast(sha256_hash)); - return boost::algorithm::hex(std::string(reinterpret_cast(sha256_hash), crypto_hash_sha256_BYTES)); - } + void reset() override { crypto_hash_sha256_init(&state_); } + std::string getHexDigest() override; + + Hash getHash() override { return Hash(Hash::Type::kSha256, getHexDigest()); } private: crypto_hash_sha256_state state_{}; @@ -100,14 +80,21 @@ class MultiPartSHA256Hasher : public MultiPartHasher { class Crypto { public: static std::string sha256digest(const std::string &text); + /** A lower case, hexadecimal version of sha256digest */ + static std::string sha256digestHex(const std::string &text); static std::string sha512digest(const std::string &text); + /** A lower case, hexadecimal version of sha512digest */ + static std::string sha512digestHex(const std::string &text); static std::string RSAPSSSign(ENGINE *engine, const std::string &private_key, const std::string &message); static std::string Sign(KeyType key_type, ENGINE *engine, const std::string &private_key, const std::string &message); static std::string ED25519Sign(const std::string &private_key, const std::string &message); static bool parseP12(BIO *p12_bio, const std::string &p12_password, std::string *out_pkey, std::string *out_cert, std::string *out_ca); - static bool extractSubjectCN(const std::string &cert, std::string *cn); + static std::string extractSubjectCN(const std::string &cert); + /* extract business category */ + static std::string extractSubjectBC(const std::string &cert); static StructGuard generateRSAKeyPairEVP(KeyType key_type); + static StructGuard generateRSAKeyPairEVP(int bits); static bool generateRSAKeyPair(KeyType key_type, std::string *public_key, std::string *private_key); static bool generateEDKeyPair(std::string *public_key, std::string *private_key); static bool generateKeyPair(KeyType key_type, std::string *public_key, std::string *private_key); @@ -117,6 +104,12 @@ class Crypto { static bool IsRsaKeyType(KeyType type); static KeyType IdentifyRSAKeyType(const std::string &public_key_pem); + + static StructGuard generateCert(int rsa_bits, int cert_days, const std::string &cert_c, + const std::string &cert_st, const std::string &cert_o, + const std::string &cert_cn, bool self_sign = false); + static void signCert(const std::string &cacert_path, const std::string &capkey_path, X509 *certificate); + static void serializeCert(std::string *pkey, std::string *cert, X509 *certificate); }; #endif // CRYPTO_H_ diff --git a/src/libaktualizr/crypto/crypto_test.cc b/src/libaktualizr/crypto/crypto_test.cc index 815e0146a7..f907a707ad 100644 --- a/src/libaktualizr/crypto/crypto_test.cc +++ b/src/libaktualizr/crypto/crypto_test.cc @@ -50,6 +50,24 @@ TEST(crypto, sign_verify_rsa_file) { } #ifdef BUILD_P11 + +class P11Crypto : public ::testing::Test { + protected: + static void SetUpTestSuite() { p11_ = std::make_shared(module_path_, pass_, label_); } + + static void TearDownTestSuite() { p11_.reset(); } + + static boost::filesystem::path module_path_; + static std::string pass_; + static std::string label_; + static std::shared_ptr p11_; +}; + +boost::filesystem::path P11Crypto::module_path_{TEST_PKCS11_MODULE_PATH}; +std::string P11Crypto::pass_{"1234"}; +std::string P11Crypto::label_{"Virtual token"}; +std::shared_ptr P11Crypto::p11_{nullptr}; + TEST(crypto, findPkcsLibrary) { const boost::filesystem::path pkcs11Path = P11Engine::findPkcsLibrary(); EXPECT_NE(pkcs11Path, ""); @@ -57,52 +75,39 @@ TEST(crypto, findPkcsLibrary) { } /* Sign and verify a file with RSA via PKCS#11. */ -TEST(crypto, sign_verify_rsa_p11) { - P11Config config; - config.module = TEST_PKCS11_MODULE_PATH; - config.pass = "1234"; - config.uptane_key_id = "03"; +TEST_F(P11Crypto, sign_verify_rsa_p11) { + const std::string uptane_key_id{"03"}; - P11EngineGuard p11(config); std::string text = "This is text for sign"; std::string key_content; - EXPECT_TRUE(p11->readUptanePublicKey(&key_content)); + EXPECT_TRUE((*p11_)->readUptanePublicKey(uptane_key_id, &key_content)); PublicKey pkey(key_content, KeyType::kRSA2048); - std::string private_key = p11->getUptaneKeyId(); - std::string signature = Utils::toBase64(Crypto::RSAPSSSign(p11->getEngine(), private_key, text)); + std::string private_key = (*p11_)->getItemFullId(uptane_key_id); + std::string signature = Utils::toBase64(Crypto::RSAPSSSign((*p11_)->getEngine(), private_key, text)); bool signe_is_ok = pkey.VerifySignature(signature, text); EXPECT_TRUE(signe_is_ok); } /* Generate RSA keypairs via PKCS#11. */ -TEST(crypto, generate_rsa_keypair_p11) { - P11Config config; - config.module = TEST_PKCS11_MODULE_PATH; - config.pass = "1234"; - config.uptane_key_id = "05"; +TEST_F(P11Crypto, generate_rsa_keypair_p11) { + const std::string uptane_key_id{"05"}; - P11EngineGuard p11(config); std::string key_content; - EXPECT_FALSE(p11->readUptanePublicKey(&key_content)); - EXPECT_TRUE(p11->generateUptaneKeyPair()); - EXPECT_TRUE(p11->readUptanePublicKey(&key_content)); + EXPECT_FALSE((*p11_)->readUptanePublicKey(uptane_key_id, &key_content)); + EXPECT_TRUE((*p11_)->generateUptaneKeyPair(uptane_key_id)); + EXPECT_TRUE((*p11_)->readUptanePublicKey(uptane_key_id, &key_content)); } /* Read a TLS certificate via PKCS#11. */ -TEST(crypto, certificate_pkcs11) { - P11Config p11_conf; - p11_conf.module = TEST_PKCS11_MODULE_PATH; - p11_conf.pass = "1234"; - p11_conf.tls_clientcert_id = "01"; - P11EngineGuard p11(p11_conf); +TEST_F(P11Crypto, certificate_pkcs11) { + const std::string tls_clientcert_id{"01"}; std::string cert; - bool res = p11->readTlsCert(&cert); + bool res = (*p11_)->readTlsCert(tls_clientcert_id, &cert); EXPECT_TRUE(res); if (!res) return; - std::string device_name; - EXPECT_TRUE(Crypto::extractSubjectCN(cert, &device_name)); + const std::string device_name = Crypto::extractSubjectCN(cert); EXPECT_EQ(device_name, "cc34f7f3-481d-443b-bceb-e838a36a2d1f"); } #endif @@ -137,12 +142,12 @@ TEST(crypto, verify_ed25519) { root_stream.close(); std::string signature = "lS1GII6MS2FAPuSzBPHOZbE0wLIRpFhlbaCSgNOJLT1h+69OjaN/YQq16uzoXX3rev/Dhw0Raa4v9xocE8GmBA=="; PublicKey pkey("cb07563157805c279ec90ccb057f2c3ea6e89200e1e67f8ae66185987ded9b1c", KeyType::kED25519); - bool signe_is_ok = pkey.VerifySignature(signature, Json::FastWriter().write(Utils::parseJSON(text))); + bool signe_is_ok = pkey.VerifySignature(signature, Utils::jsonToCanonicalStr(Utils::parseJSON(text))); EXPECT_TRUE(signe_is_ok); std::string signature_bad = "33lS1GII6MS2FAPuSzBPHOZbE0wLIRpFhlbaCSgNOJLT1h+69OjaN/YQq16uzoXX3rev/Dhw0Raa4v9xocE8GmBA=="; - signe_is_ok = pkey.VerifySignature(signature_bad, Json::FastWriter().write(Utils::parseJSON(text))); + signe_is_ok = pkey.VerifySignature(signature_bad, Utils::jsonToCanonicalStr(Utils::parseJSON(text))); EXPECT_FALSE(signe_is_ok); } diff --git a/src/libaktualizr/crypto/hash_test.cc b/src/libaktualizr/crypto/hash_test.cc new file mode 100644 index 0000000000..cce0beb1aa --- /dev/null +++ b/src/libaktualizr/crypto/hash_test.cc @@ -0,0 +1,35 @@ +#include + +#include "crypto.h" +#include "logging/logging.h" + +TEST(Hash, EncodeDecode) { + std::vector hashes = {{Hash::Type::kSha256, "abcd"}, {Hash::Type::kSha512, "defg"}}; + + std::string encoded = Hash::encodeVector(hashes); + std::vector decoded = Hash::decodeVector(encoded); + + EXPECT_EQ(hashes, decoded); +} + +TEST(Hash, DecodeBad) { + std::string bad1 = ":"; + EXPECT_EQ(Hash::decodeVector(bad1), std::vector{}); + + std::string bad2 = ":abcd;sha256:12"; + EXPECT_EQ(Hash::decodeVector(bad2), std::vector{Hash(Hash::Type::kSha256, "12")}); + + std::string bad3 = "sha256;"; + EXPECT_EQ(Hash::decodeVector(bad3), std::vector{}); + + std::string bad4 = "sha256:;"; + EXPECT_EQ(Hash::decodeVector(bad4), std::vector{}); +} + +#ifndef __NO_MAIN__ +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + logger_set_threshold(boost::log::trivial::trace); + return RUN_ALL_TESTS(); +} +#endif diff --git a/src/libaktualizr/crypto/keymanager.cc b/src/libaktualizr/crypto/keymanager.cc index d6a0e854db..a7e46b402d 100644 --- a/src/libaktualizr/crypto/keymanager.cc +++ b/src/libaktualizr/crypto/keymanager.cc @@ -1,14 +1,17 @@ #include "keymanager.h" -#include "utilities/types.h" #include +#include +#include #include -#include -#if defined(ANDROID) -#include "androidkeystore.h" -#endif +#include "crypto/crypto.h" +#include "crypto/openssl_compat.h" +#include "http/httpinterface.h" +#include "libaktualizr/types.h" +#include "p11engine.h" +#include "storage/invstorage.h" // by using constexpr the compiler can optimize out method calls when the // feature is disabled. We won't then need to link with the actual p11 engine @@ -19,16 +22,21 @@ static constexpr bool built_with_p11 = true; static constexpr bool built_with_p11 = false; #endif -KeyManager::KeyManager(std::shared_ptr backend, KeyManagerConfig config) +KeyManager::KeyManager(std::shared_ptr backend, KeyManagerConfig config, + const std::shared_ptr &p11) : backend_(std::move(backend)), config_(std::move(config)) { if (built_with_p11) { - p11_ = std_::make_unique(config_.p11); + if (!p11) { + p11_ = std::make_shared(config_.p11.module, config_.p11.pass, config_.p11.label); + } else { + p11_ = p11; + } } } void KeyManager::loadKeys(const std::string *pkey_content, const std::string *cert_content, const std::string *ca_content) { - if (config_.tls_pkey_source == CryptoSource::kFile || config_.tls_pkey_source == CryptoSource::kAndroid) { + if (config_.tls_pkey_source == CryptoSource::kFile) { std::string pkey; if (pkey_content != nullptr) { pkey = *pkey_content; @@ -42,7 +50,7 @@ void KeyManager::loadKeys(const std::string *pkey_content, const std::string *ce tmp_pkey_file->PutContents(pkey); } } - if (config_.tls_cert_source == CryptoSource::kFile || config_.tls_cert_source == CryptoSource::kAndroid) { + if (config_.tls_cert_source == CryptoSource::kFile) { std::string cert; if (cert_content != nullptr) { cert = *cert_content; @@ -56,7 +64,7 @@ void KeyManager::loadKeys(const std::string *pkey_content, const std::string *ce tmp_cert_file->PutContents(cert); } } - if (config_.tls_ca_source == CryptoSource::kFile || config_.tls_ca_source == CryptoSource::kAndroid) { + if (config_.tls_ca_source == CryptoSource::kFile) { std::string ca; if (ca_content != nullptr) { ca = *ca_content; @@ -78,9 +86,9 @@ std::string KeyManager::getPkeyFile() const { if (!built_with_p11) { throw std::runtime_error("Aktualizr was built without PKCS#11"); } - pkey_file = (*p11_)->getTlsPkeyId(); + pkey_file = (*p11_)->getItemFullId(config_.p11.tls_pkey_id); } - if (config_.tls_pkey_source == CryptoSource::kFile || config_.tls_pkey_source == CryptoSource::kAndroid) { + if (config_.tls_pkey_source == CryptoSource::kFile) { if (tmp_pkey_file && !boost::filesystem::is_empty(tmp_pkey_file->PathString())) { pkey_file = tmp_pkey_file->PathString(); } @@ -94,9 +102,9 @@ std::string KeyManager::getCertFile() const { if (!built_with_p11) { throw std::runtime_error("Aktualizr was built without PKCS#11"); } - cert_file = (*p11_)->getTlsCertId(); + cert_file = (*p11_)->getItemFullId(config_.p11.tls_clientcert_id); } - if (config_.tls_cert_source == CryptoSource::kFile || config_.tls_cert_source == CryptoSource::kAndroid) { + if (config_.tls_cert_source == CryptoSource::kFile) { if (tmp_cert_file && !boost::filesystem::is_empty(tmp_cert_file->PathString())) { cert_file = tmp_cert_file->PathString(); } @@ -110,9 +118,9 @@ std::string KeyManager::getCaFile() const { if (!built_with_p11) { throw std::runtime_error("Aktualizr was built without PKCS#11"); } - ca_file = (*p11_)->getTlsCacertId(); + ca_file = (*p11_)->getItemFullId(config_.p11.tls_cacert_id); } - if (config_.tls_ca_source == CryptoSource::kFile || config_.tls_ca_source == CryptoSource::kAndroid) { + if (config_.tls_ca_source == CryptoSource::kFile) { if (tmp_ca_file && !boost::filesystem::is_empty(tmp_ca_file->PathString())) { ca_file = tmp_ca_file->PathString(); } @@ -126,9 +134,9 @@ std::string KeyManager::getPkey() const { if (!built_with_p11) { throw std::runtime_error("Aktualizr was built without PKCS#11"); } - pkey = (*p11_)->getTlsPkeyId(); + pkey = (*p11_)->getItemFullId(config_.p11.tls_pkey_id); } - if (config_.tls_pkey_source == CryptoSource::kFile || config_.tls_pkey_source == CryptoSource::kAndroid) { + if (config_.tls_pkey_source == CryptoSource::kFile) { backend_->loadTlsPkey(&pkey); } return pkey; @@ -140,9 +148,9 @@ std::string KeyManager::getCert() const { if (!built_with_p11) { throw std::runtime_error("Aktualizr was built without PKCS#11"); } - cert = (*p11_)->getTlsCertId(); + cert = (*p11_)->getItemFullId(config_.p11.tls_clientcert_id); } - if (config_.tls_cert_source == CryptoSource::kFile || config_.tls_cert_source == CryptoSource::kAndroid) { + if (config_.tls_cert_source == CryptoSource::kFile) { backend_->loadTlsCert(&cert); } return cert; @@ -154,18 +162,37 @@ std::string KeyManager::getCa() const { if (!built_with_p11) { throw std::runtime_error("Aktualizr was built without PKCS#11"); } - ca = (*p11_)->getTlsCacertId(); + ca = (*p11_)->getItemFullId(config_.p11.tls_cacert_id); } - if (config_.tls_ca_source == CryptoSource::kFile || config_.tls_ca_source == CryptoSource::kAndroid) { + if (config_.tls_ca_source == CryptoSource::kFile) { backend_->loadTlsCa(&ca); } return ca; } std::string KeyManager::getCN() const { - std::string not_found_cert_message = "Certificate is not found, can't extract device_id"; + const std::string not_found_cert_message = "Certificate is not found, can't extract device_id"; + std::string cert; + if (config_.tls_cert_source == CryptoSource::kFile) { + if (!backend_->loadTlsCert(&cert)) { + throw std::runtime_error(not_found_cert_message); + } + } else { // CryptoSource::kPkcs11 + if (!built_with_p11) { + throw std::runtime_error("Aktualizr was built without PKCS#11 support, can't extract device_id"); + } + if (!(*p11_)->readTlsCert(config_.p11.tls_clientcert_id, &cert)) { + throw std::runtime_error(not_found_cert_message); + } + } + + return Crypto::extractSubjectCN(cert); +} + +std::string KeyManager::getBC() const { + const std::string not_found_cert_message = "Certificate is not found, can't extract device_id"; std::string cert; - if (config_.tls_cert_source == CryptoSource::kFile || config_.tls_cert_source == CryptoSource::kAndroid) { + if (config_.tls_cert_source == CryptoSource::kFile) { if (!backend_->loadTlsCert(&cert)) { throw std::runtime_error(not_found_cert_message); } @@ -173,7 +200,26 @@ std::string KeyManager::getCN() const { if (!built_with_p11) { throw std::runtime_error("Aktualizr was built without PKCS#11 support, can't extract device_id"); } - if (!(*p11_)->readTlsCert(&cert)) { + if (!(*p11_)->readTlsCert(config_.p11.tls_clientcert_id, &cert)) { + throw std::runtime_error(not_found_cert_message); + } + } + return Crypto::extractSubjectBC(cert); +} + +void KeyManager::getCertInfo(std::string *subject, std::string *issuer, std::string *not_before, + std::string *not_after) const { + std::string not_found_cert_message = "Certificate is not found, can't extract device certificate"; + std::string cert; + if (config_.tls_cert_source == CryptoSource::kFile) { + if (!backend_->loadTlsCert(&cert)) { + throw std::runtime_error(not_found_cert_message); + } + } else { // CryptoSource::kPkcs11 + if (!built_with_p11) { + throw std::runtime_error("Aktualizr was built without PKCS#11 support, can't extract device certificate"); + } + if (!(*p11_)->readTlsCert(config_.p11.tls_clientcert_id, &cert)) { throw std::runtime_error(not_found_cert_message); } } @@ -184,22 +230,53 @@ std::string KeyManager::getCN() const { throw std::runtime_error("Could not parse certificate"); } - int len = X509_NAME_get_text_by_NID(X509_get_subject_name(x.get()), NID_commonName, nullptr, 0); - if (len < 0) { - throw std::runtime_error("Could not get CN from certificate"); + StructGuard subj_bio(BIO_new(BIO_s_mem()), BIO_vfree); + X509_NAME_print_ex(subj_bio.get(), X509_get_subject_name(x.get()), 1, 0); + char *subj_buf = nullptr; + auto subj_len = BIO_get_mem_data(subj_bio.get(), &subj_buf); // NOLINT(cppcoreguidelines-pro-type-cstyle-cast) + if (subj_buf == nullptr) { + throw std::runtime_error("Could not parse certificate subject"); + } + *subject = std::string(subj_buf, static_cast(subj_len)); + + StructGuard issuer_bio(BIO_new(BIO_s_mem()), BIO_vfree); + X509_NAME_print_ex(issuer_bio.get(), X509_get_issuer_name(x.get()), 1, 0); + char *issuer_buf = nullptr; + auto issuer_len = BIO_get_mem_data(issuer_bio.get(), &issuer_buf); // NOLINT(cppcoreguidelines-pro-type-cstyle-cast) + if (issuer_buf == nullptr) { + throw std::runtime_error("Could not parse certificate issuer"); } - boost::scoped_array buf(new char[len + 1]); - X509_NAME_get_text_by_NID(X509_get_subject_name(x.get()), NID_commonName, buf.get(), len + 1); - std::string cn(buf.get()); - return cn; + *issuer = std::string(issuer_buf, static_cast(issuer_len)); + +#if AKTUALIZR_OPENSSL_PRE_11 + const ASN1_TIME *nb_asn1 = X509_get_notBefore(x.get()); +#else + const ASN1_TIME *nb_asn1 = X509_get0_notBefore(x.get()); +#endif + StructGuard nb_bio(BIO_new(BIO_s_mem()), BIO_vfree); + ASN1_TIME_print(nb_bio.get(), nb_asn1); + char *nb_buf; + auto nb_len = BIO_get_mem_data(nb_bio.get(), &nb_buf); // NOLINT(cppcoreguidelines-pro-type-cstyle-cast) + *not_before = std::string(nb_buf, static_cast(nb_len)); + +#if AKTUALIZR_OPENSSL_PRE_11 + const ASN1_TIME *na_asn1 = X509_get_notAfter(x.get()); +#else + const ASN1_TIME *na_asn1 = X509_get0_notAfter(x.get()); +#endif + StructGuard na_bio(BIO_new(BIO_s_mem()), BIO_vfree); + ASN1_TIME_print(na_bio.get(), na_asn1); + char *na_buf; + auto na_len = BIO_get_mem_data(na_bio.get(), &na_buf); // NOLINT(cppcoreguidelines-pro-type-cstyle-cast) + *not_after = std::string(na_buf, static_cast(na_len)); } -void KeyManager::copyCertsToCurl(HttpInterface &http) { +void KeyManager::copyCertsToCurl(HttpInterface &http) const { std::string pkey = getPkey(); std::string cert = getCert(); std::string ca = getCa(); - if ((pkey.size() != 0u) && (cert.size() != 0u) && (ca.size() != 0u)) { + if (!pkey.empty() && !cert.empty() && !ca.empty()) { http.setCerts(ca, config_.tls_ca_source, cert, config_.tls_cert_source, pkey, config_.tls_pkey_source); } } @@ -216,22 +293,25 @@ Json::Value KeyManager::signTuf(const Json::Value &in_data) const { } std::string b64sig; - if (config_.uptane_key_source == CryptoSource::kAndroid) { -#if defined(ANDROID) - b64sig = AndroidKeyStore::instance().signData(Json::FastWriter().write(in_data)); -#else - throw std::runtime_error("Aktualizr was built without Android support"); -#endif - } else { - if (config_.uptane_key_source == CryptoSource::kFile) { - backend_->loadPrimaryPrivate(&private_key); - } - b64sig = Utils::toBase64( - Crypto::Sign(config_.uptane_key_type, crypto_engine, private_key, Json::FastWriter().write(in_data))); + if (config_.uptane_key_source == CryptoSource::kFile) { + backend_->loadPrimaryPrivate(&private_key); } + b64sig = Utils::toBase64( + Crypto::Sign(config_.uptane_key_type, crypto_engine, private_key, Utils::jsonToCanonicalStr(in_data))); Json::Value signature; - signature["method"] = "rsassa-pss"; + switch (config_.uptane_key_type) { + case KeyType::kRSA2048: + case KeyType::kRSA3072: + case KeyType::kRSA4096: + signature["method"] = "rsassa-pss"; + break; + case KeyType::kED25519: + signature["method"] = "ed25519"; + break; + default: + throw std::runtime_error("Unknown key type"); + } signature["sig"] = b64sig; Json::Value out_data; @@ -254,31 +334,19 @@ std::string KeyManager::generateUptaneKeyPair() { } } if (primary_public.empty() && primary_private.empty()) { - throw std::runtime_error("Could not get uptane keys"); - } - } else if (config_.uptane_key_source == CryptoSource::kAndroid) { -#if defined(ANDROID) - primary_public = AndroidKeyStore::instance().getPublicKey(); - if (primary_public.empty()) { - primary_public = AndroidKeyStore::instance().generateKeyPair(); - } -#else - throw std::runtime_error("Aktualizr was built without Android support"); -#endif - if (primary_public.empty()) { - throw std::runtime_error("Could not get uptane keys"); + throw std::runtime_error("Could not get Uptane keys"); } } else { if (!built_with_p11) { - throw std::runtime_error("Aktualizr was built without pkcs11 support!"); + throw std::runtime_error("Aktualizr was built without PKCS#11 support!"); } // dummy read to check if the key is present - if (!(*p11_)->readUptanePublicKey(&primary_public)) { - (*p11_)->generateUptaneKeyPair(); + if (!(*p11_)->readUptanePublicKey(config_.p11.uptane_key_id, &primary_public)) { + (*p11_)->generateUptaneKeyPair(config_.p11.uptane_key_id); } // really read the key - if (primary_public.empty() && !(*p11_)->readUptanePublicKey(&primary_public)) { - throw std::runtime_error("Could not get uptane keys"); + if (primary_public.empty() && !(*p11_)->readUptanePublicKey(config_.p11.uptane_key_id, &primary_public)) { + throw std::runtime_error("Could not get Uptane keys"); } } return primary_public; @@ -288,24 +356,15 @@ PublicKey KeyManager::UptanePublicKey() const { std::string primary_public; if (config_.uptane_key_source == CryptoSource::kFile) { if (!backend_->loadPrimaryPublic(&primary_public)) { - throw std::runtime_error("Could not get uptane public key!"); - } - } else if (config_.uptane_key_source == CryptoSource::kAndroid) { -#if defined(ANDROID) - primary_public = AndroidKeyStore::instance().getPublicKey(); -#else - throw std::runtime_error("Aktualizr was built without Android support"); -#endif - if (primary_public.empty()) { - throw std::runtime_error("Could not get uptane public key!"); + throw std::runtime_error("Could not get Uptane public key!"); } } else { if (!built_with_p11) { - throw std::runtime_error("Aktualizr was built without pkcs11 support!"); + throw std::runtime_error("Aktualizr was built without PKCS#11 support!"); } // dummy read to check if the key is present - if (!(*p11_)->readUptanePublicKey(&primary_public)) { - throw std::runtime_error("Could not get uptane public key!"); + if (!(*p11_)->readUptanePublicKey(config_.p11.uptane_key_id, &primary_public)) { + throw std::runtime_error("Could not get Uptane public key!"); } } return PublicKey(primary_public, config_.uptane_key_type); diff --git a/src/libaktualizr/crypto/keymanager.h b/src/libaktualizr/crypto/keymanager.h index d7c43f884d..148dae6988 100644 --- a/src/libaktualizr/crypto/keymanager.h +++ b/src/libaktualizr/crypto/keymanager.h @@ -1,19 +1,26 @@ #ifndef KEYMANAGER_H_ #define KEYMANAGER_H_ -#include "keymanager_config.h" +#include +#include -#include "http/httpinterface.h" -#include "p11engine.h" -#include "storage/invstorage.h" -#include "utilities/utils.h" +#include "json/json.h" + +#include "libaktualizr/config.h" // for KeyManagerConfig +#include "libaktualizr/types.h" // for KeyType, PublicKey +#include "utilities/utils.h" // for TemporaryFile + +class HttpInterface; +class INvStorage; +class P11EngineGuard; class KeyManager { public: // std::string RSAPSSSign(const std::string &message); // Contains the logic from HttpClient::setCerts() - void copyCertsToCurl(HttpInterface &http); - KeyManager(std::shared_ptr backend, KeyManagerConfig config); + void copyCertsToCurl(HttpInterface &http) const; + KeyManager(std::shared_ptr backend, KeyManagerConfig config, + const std::shared_ptr &p11 = nullptr); void loadKeys(const std::string *pkey_content = nullptr, const std::string *cert_content = nullptr, const std::string *ca_content = nullptr); std::string getPkeyFile() const; @@ -23,7 +30,9 @@ class KeyManager { std::string getCert() const; std::string getCa() const; std::string getCN() const; - bool isOk() const { return ((getPkey().size() != 0u) && (getCert().size() != 0u) && (getCa().size() != 0u)); } + std::string getBC() const; + void getCertInfo(std::string *subject, std::string *issuer, std::string *not_before, std::string *not_after) const; + bool isOk() const { return (!getPkey().empty() && !getCert().empty() && !getCa().empty()); } std::string generateUptaneKeyPair(); KeyType getUptaneKeyType() const { return config_.uptane_key_type; } Json::Value signTuf(const Json::Value &in_data) const; @@ -33,7 +42,7 @@ class KeyManager { private: std::shared_ptr backend_; const KeyManagerConfig config_; - std::unique_ptr p11_; + std::shared_ptr p11_; std::unique_ptr tmp_pkey_file; std::unique_ptr tmp_cert_file; std::unique_ptr tmp_ca_file; diff --git a/src/libaktualizr/crypto/keymanager_config.h b/src/libaktualizr/crypto/keymanager_config.h deleted file mode 100644 index 3fe76f9af4..0000000000 --- a/src/libaktualizr/crypto/keymanager_config.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef CRYPTO_KEYMANAGER_CONFIG_H_ -#define CRYPTO_KEYMANAGER_CONFIG_H_ - -#include "crypto/p11_config.h" -#include "utilities/types.h" - -// bundle some parts of the main config together -// Should be derived by calling Config::keymanagerConfig() -struct KeyManagerConfig { - KeyManagerConfig() = delete; // only allow construction by initializer list - P11Config p11; - CryptoSource tls_ca_source; - CryptoSource tls_pkey_source; - CryptoSource tls_cert_source; - KeyType uptane_key_type; - CryptoSource uptane_key_source; -}; - -#endif // CRYPTO_KEYMANAGER_CONFIG_H_ diff --git a/src/libaktualizr/crypto/keymanager_test.cc b/src/libaktualizr/crypto/keymanager_test.cc index cbdc512c27..582bb48611 100644 --- a/src/libaktualizr/crypto/keymanager_test.cc +++ b/src/libaktualizr/crypto/keymanager_test.cc @@ -1,10 +1,13 @@ #include + #include +#include #include "json/json.h" -#include "config/config.h" #include "crypto/keymanager.h" +#include "crypto/p11engine.h" +#include "libaktualizr/config.h" #include "storage/sqlstorage.h" #include "utilities/utils.h" @@ -33,6 +36,7 @@ TEST(KeyManager, SignTuf) { EXPECT_EQ(signed_json["signatures"][0]["keyid"].asString(), "6a809c62b4f6c2ae11abfb260a6a9a57d205fc2887ab9c83bd6be0790293e187"); EXPECT_NE(signed_json["signatures"][0]["sig"].asString().size(), 0); + EXPECT_EQ(signed_json["signatures"][0]["method"].asString(), "rsassa-pss"); } /* Sign TUF metadata with ED25519. */ @@ -58,6 +62,7 @@ TEST(KeyManager, SignED25519Tuf) { EXPECT_EQ(signed_json["signatures"][0]["keyid"].asString(), "a6d0f6b52ae833175dd7724899507709231723037845715c7677670e0195f850"); EXPECT_NE(signed_json["signatures"][0]["sig"].asString().size(), 0); + EXPECT_EQ(signed_json["signatures"][0]["method"].asString(), "ed25519"); } TEST(KeyManager, InitFileEmpty) { @@ -109,15 +114,35 @@ TEST(KeyManager, InitFileValid) { } #ifdef BUILD_P11 + +class P11KeyManager : public ::testing::Test { + protected: + static void SetUpTestSuite() { p11_ = std::make_shared(module_path_, pass_, label_); } + + static void TearDownTestSuite() { p11_.reset(); } + + static boost::filesystem::path module_path_; + static std::string pass_; + static std::string label_; + static std::shared_ptr p11_; +}; + +boost::filesystem::path P11KeyManager::module_path_{TEST_PKCS11_MODULE_PATH}; +std::string P11KeyManager::pass_{"1234"}; +std::string P11KeyManager::label_{"Virtual token"}; +std::shared_ptr P11KeyManager::p11_{nullptr}; + /* Sign and verify a file with RSA via PKCS#11. */ -TEST(KeyManager, SignTufPkcs11) { +TEST_F(P11KeyManager, SignTufPkcs11) { + P11Config p11_conf; + p11_conf.module = module_path_; + p11_conf.pass = pass_; + p11_conf.label = label_; + p11_conf.uptane_key_id = "03"; + Json::Value tosign_json; tosign_json["mykey"] = "value"; - P11Config p11_conf; - p11_conf.module = TEST_PKCS11_MODULE_PATH; - p11_conf.pass = "1234"; - p11_conf.uptane_key_id = "03"; Config config; config.p11 = p11_conf; config.uptane.key_source = CryptoSource::kPkcs11; @@ -125,7 +150,7 @@ TEST(KeyManager, SignTufPkcs11) { TemporaryDirectory temp_dir; config.storage.path = temp_dir.Path(); std::shared_ptr storage = INvStorage::newStorage(config.storage); - KeyManager keys(storage, config.keymanagerConfig()); + KeyManager keys(storage, config.keymanagerConfig(), p11_); EXPECT_GT(keys.UptanePublicKey().Value().size(), 0); Json::Value signed_json = keys.signTuf(tosign_json); @@ -136,13 +161,14 @@ TEST(KeyManager, SignTufPkcs11) { } /* Generate Uptane keys, use them for signing, and verify them. */ -TEST(KeyManager, GenSignTufPkcs11) { +TEST_F(P11KeyManager, GenSignTufPkcs11) { Json::Value tosign_json; tosign_json["mykey"] = "value"; P11Config p11_conf; - p11_conf.module = TEST_PKCS11_MODULE_PATH; - p11_conf.pass = "1234"; + p11_conf.module = module_path_; + p11_conf.pass = pass_; + p11_conf.label = label_; p11_conf.uptane_key_id = "06"; Config config; config.p11 = p11_conf; @@ -151,10 +177,10 @@ TEST(KeyManager, GenSignTufPkcs11) { TemporaryDirectory temp_dir; config.storage.path = temp_dir.Path(); std::shared_ptr storage = INvStorage::newStorage(config.storage); - KeyManager keys(storage, config.keymanagerConfig()); + KeyManager keys(storage, config.keymanagerConfig(), p11_); - P11EngineGuard p11(config.p11); - EXPECT_TRUE(p11->generateUptaneKeyPair()); + P11EngineGuard p11(config.p11.module, config.p11.pass, config.p11.label); + EXPECT_TRUE(p11->generateUptaneKeyPair(p11_conf.uptane_key_id)); EXPECT_GT(keys.UptanePublicKey().Value().size(), 0); Json::Value signed_json = keys.signTuf(tosign_json); @@ -162,12 +188,13 @@ TEST(KeyManager, GenSignTufPkcs11) { EXPECT_NE(signed_json["signatures"][0]["sig"].asString().size(), 0); } -/* Generate RSA keypairs via PKCS#11. */ -TEST(KeyManager, InitPkcs11Valid) { +///* Generate RSA keypairs via PKCS#11. */ +TEST_F(P11KeyManager, InitPkcs11Valid) { Config config; P11Config p11_conf; - p11_conf.module = TEST_PKCS11_MODULE_PATH; - p11_conf.pass = "1234"; + p11_conf.module = module_path_; + p11_conf.pass = pass_; + p11_conf.label = label_; p11_conf.tls_pkey_id = "02"; p11_conf.tls_clientcert_id = "01"; config.p11 = p11_conf; @@ -181,7 +208,7 @@ TEST(KeyManager, InitPkcs11Valid) { // Getting the CA from the HSM is not currently supported. std::string ca = Utils::readFile("tests/test_data/prov/root.crt"); storage->storeTlsCa(ca); - KeyManager keys(storage, config.keymanagerConfig()); + KeyManager keys(storage, config.keymanagerConfig(), p11_); EXPECT_TRUE(keys.getCaFile().empty()); EXPECT_FALSE(keys.getPkeyFile().empty()); EXPECT_FALSE(keys.getCertFile().empty()); diff --git a/src/libaktualizr/crypto/p11_config.h b/src/libaktualizr/crypto/p11_config.h deleted file mode 100644 index 426f94c583..0000000000 --- a/src/libaktualizr/crypto/p11_config.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef CRYPTO_P11_CONFIG_H_ -#define CRYPTO_P11_CONFIG_H_ - -#include - -#include -#include - -#include "utilities/config_utils.h" - -// declare p11 types as incomplete so that the header can be used without libp11 -struct PKCS11_ctx_st; -struct PKCS11_slot_st; - -struct P11Config { - boost::filesystem::path module; - std::string pass; - std::string uptane_key_id; - std::string tls_cacert_id; - std::string tls_pkey_id; - std::string tls_clientcert_id; - - void updateFromPropertyTree(const boost::property_tree::ptree &pt) { - CopyFromConfig(module, "module", pt); - CopyFromConfig(pass, "pass", pt); - CopyFromConfig(uptane_key_id, "uptane_key_id", pt); - CopyFromConfig(tls_cacert_id, "tls_cacert_id", pt); - CopyFromConfig(tls_pkey_id, "tls_pkey_id", pt); - CopyFromConfig(tls_clientcert_id, "tls_clientcert_id", pt); - } - - void writeToStream(std::ostream &out_stream) const { - writeOption(out_stream, module, "module"); - writeOption(out_stream, pass, "pass"); - writeOption(out_stream, uptane_key_id, "uptane_key_id"); - writeOption(out_stream, tls_cacert_id, "tls_ca_id"); - writeOption(out_stream, tls_pkey_id, "tls_pkey_id"); - writeOption(out_stream, tls_clientcert_id, "tls_clientcert_id"); - } -}; - -#endif // CRYPTO_P11_CONFIG_H_ diff --git a/src/libaktualizr/crypto/p11engine.cc b/src/libaktualizr/crypto/p11engine.cc index 7cc9d485c1..f108a0ac1f 100644 --- a/src/libaktualizr/crypto/p11engine.cc +++ b/src/libaktualizr/crypto/p11engine.cc @@ -1,5 +1,6 @@ #include "p11engine.h" +#include #include #include @@ -14,8 +15,8 @@ #include "utilities/config_utils.h" #include "utilities/utils.h" -P11Engine* P11EngineGuard::instance = nullptr; -int P11EngineGuard::ref_counter = 0; +P11Engine* P11EngineGuard::instance = nullptr; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) +int P11EngineGuard::ref_counter = 0; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) P11ContextWrapper::P11ContextWrapper(const boost::filesystem::path& module) { if (module.empty()) { @@ -42,11 +43,11 @@ P11ContextWrapper::~P11ContextWrapper() { P11SlotsWrapper::P11SlotsWrapper(PKCS11_ctx_st* ctx_in) { ctx = ctx_in; if (ctx == nullptr) { - wslots_ = nullptr; + slots_ = nullptr; nslots = 0; return; } - if (PKCS11_enumerate_slots(ctx, &wslots_, &nslots) != 0) { + if (PKCS11_enumerate_slots(ctx, &slots_, &nslots) != 0) { LOG_ERROR << "Couldn't enumerate slots" << ": " << ERR_error_string(ERR_get_error(), nullptr); throw std::runtime_error("PKCS11 error"); @@ -54,17 +55,22 @@ P11SlotsWrapper::P11SlotsWrapper(PKCS11_ctx_st* ctx_in) { } P11SlotsWrapper::~P11SlotsWrapper() { - if ((wslots_ != nullptr) && (nslots != 0u)) { - PKCS11_release_all_slots(ctx, wslots_, nslots); + if ((slots_ != nullptr) && (nslots != 0U)) { + PKCS11_release_all_slots(ctx, slots_, nslots); } } -P11Engine::P11Engine(P11Config config) : config_(std::move(config)), ctx_(config_.module), slots_(ctx_.get()) { - if (config_.module.empty()) { +P11Engine::P11Engine(boost::filesystem::path module_path, std::string pass, std::string label) + : module_path_(std::move(module_path)), + pass_{std::move(pass)}, + label_{std::move(label)}, + ctx_(module_path_), + wslots_(ctx_.get()) { + if (module_path_.empty()) { return; } - PKCS11_SLOT* slot = PKCS11_find_token(ctx_.get(), slots_.get_slots(), slots_.get_nslots()); + PKCS11_SLOT* slot = findTokenSlot(); if ((slot == nullptr) || (slot->token == nullptr)) { throw std::runtime_error("Couldn't find pkcs11 token"); } @@ -76,7 +82,7 @@ P11Engine::P11Engine(P11Config config) : config_(std::move(config)), ctx_(config LOG_DEBUG << "Slot token model.......: " << slot->token->model; LOG_DEBUG << "Slot token serialnr....: " << slot->token->serialnr; - uri_prefix_ = std::string("pkcs11:serial=") + slot->token->serialnr + ";pin-value=" + config_.pass + ";id=%"; + uri_prefix_ = std::string("pkcs11:serial=") + slot->token->serialnr + ";pin-value=" + pass_ + ";id=%"; ENGINE_load_builtin_engines(); ENGINE* engine = ENGINE_by_id("dynamic"); @@ -89,31 +95,31 @@ P11Engine::P11Engine(P11Config config) : config_(std::move(config)), ctx_(config const boost::filesystem::path pkcs11Path = findPkcsLibrary(); LOG_INFO << "Loading PKCS#11 engine library: " << pkcs11Path.string(); if (ENGINE_ctrl_cmd_string(engine, "SO_PATH", pkcs11Path.c_str(), 0) == 0) { - throw std::runtime_error(std::string("Engine command failed: SO_PATH ") + pkcs11Path.string()); + throw std::runtime_error(std::string("P11 engine command failed: SO_PATH ") + pkcs11Path.string()); } if (ENGINE_ctrl_cmd_string(engine, "ID", "pkcs11", 0) == 0) { - throw std::runtime_error("Engine command failed: ID pksc11"); + throw std::runtime_error("P11 engine command failed: ID pksc11"); } if (ENGINE_ctrl_cmd_string(engine, "LIST_ADD", "1", 0) == 0) { - throw std::runtime_error("Engine command failed: LIST_ADD 1"); + throw std::runtime_error("P11 engine command failed: LIST_ADD 1"); } if (ENGINE_ctrl_cmd_string(engine, "LOAD", nullptr, 0) == 0) { - throw std::runtime_error("Engine command failed: LOAD"); + throw std::runtime_error("P11 engine command failed: LOAD"); } - if (ENGINE_ctrl_cmd_string(engine, "MODULE_PATH", config_.module.c_str(), 0) == 0) { - throw std::runtime_error(std::string("Engine command failed: MODULE_PATH ") + config_.module.string()); + if (ENGINE_ctrl_cmd_string(engine, "MODULE_PATH", module_path_.c_str(), 0) == 0) { + throw std::runtime_error(std::string("P11 engine command failed: MODULE_PATH ") + module_path_.string()); } - if (ENGINE_ctrl_cmd_string(engine, "PIN", config_.pass.c_str(), 0) == 0) { - throw std::runtime_error(std::string("Engine command failed: PIN")); + if (ENGINE_ctrl_cmd_string(engine, "PIN", pass_.c_str(), 0) == 0) { + throw std::runtime_error(std::string("P11 engine command failed: PIN")); } if (ENGINE_init(engine) == 0) { - throw std::runtime_error("Engine initialization failed"); + throw std::runtime_error("P11 engine initialization failed"); } } catch (const std::runtime_error& exc) { // Note: treat these in a special case, as ENGINE_finish cannot be called on @@ -126,27 +132,43 @@ P11Engine::P11Engine(P11Config config) : config_(std::move(config)), ctx_(config ssl_engine_ = engine; } -boost::filesystem::path P11Engine::findPkcsLibrary() { -#ifdef TEST_PKCS11_ENGINE_PATH - const boost::filesystem::path custom_path = TEST_PKCS11_ENGINE_PATH; -#else - const boost::filesystem::path custom_path; +// Hack for clang-tidy +#ifndef PKCS11_ENGINE_PATH +#define PKCS11_ENGINE_PATH "dummy" #endif - const boost::filesystem::path openssl11_path = "/usr/lib/engines-1.1/pkcs11.so"; - const boost::filesystem::path default_path = "/usr/lib/engines/pkcs11.so"; - if (boost::filesystem::exists(custom_path)) { - return custom_path; - } else if (boost::filesystem::exists(openssl11_path)) { - return openssl11_path; - } else { - return default_path; + +boost::filesystem::path P11Engine::findPkcsLibrary() { + static const boost::filesystem::path engine_path = PKCS11_ENGINE_PATH; + + if (!boost::filesystem::exists(engine_path)) { + LOG_ERROR << "PKCS11 engine not available (" << engine_path << ")"; + return ""; } + + return engine_path; } PKCS11_SLOT* P11Engine::findTokenSlot() const { - PKCS11_SLOT* slot = PKCS11_find_token(ctx_.get(), slots_.get_slots(), slots_.get_nslots()); + PKCS11_SLOT* slot{nullptr}; + PKCS11_TOKEN* tok; + const auto nslot{wslots_.get_nslots()}; + + if (label_.empty()) { + LOG_WARNING << "Token label missing. Using 1st initialized token."; + slot = PKCS11_find_token(ctx_.get(), wslots_.get_slots(), wslots_.get_nslots()); + } else { + auto iterslot{wslots_.get_slots()}; + for (unsigned int i = 0; i < nslot; i++, iterslot++) { + if (iterslot != nullptr && (tok = iterslot->token) != nullptr) { + if (label_ == tok->label) { + slot = iterslot; + break; + } + } + } + } if ((slot == nullptr) || (slot->token == nullptr)) { - LOG_ERROR << "Couldn't find a token"; + LOG_ERROR << "Couldn't find a token with label " << label_; return nullptr; } int rv; @@ -156,7 +178,7 @@ PKCS11_SLOT* P11Engine::findTokenSlot() const { LOG_ERROR << "Error creating rw session in to the slot: " << ERR_error_string(ERR_get_error(), nullptr); } - if (PKCS11_login(slot, 0, config_.pass.c_str()) != 0) { + if (PKCS11_login(slot, 0, pass_.c_str()) != 0) { LOG_ERROR << "Error logging in to the token: " << ERR_error_string(ERR_get_error(), nullptr); return nullptr; } @@ -164,11 +186,11 @@ PKCS11_SLOT* P11Engine::findTokenSlot() const { return slot; } -bool P11Engine::readUptanePublicKey(std::string* key_out) { - if (config_.module.empty()) { +bool P11Engine::readUptanePublicKey(const std::string& uptane_key_id, std::string* key_out) { + if (module_path_.empty()) { return false; } - if ((config_.uptane_key_id.length() % 2) != 0u) { + if ((uptane_key_id.length() % 2) != 0U) { return false; // id is a hex string } @@ -187,13 +209,13 @@ bool P11Engine::readUptanePublicKey(std::string* key_out) { PKCS11_KEY* key = nullptr; { std::vector id_hex; - boost::algorithm::unhex(config_.uptane_key_id, std::back_inserter(id_hex)); + boost::algorithm::unhex(uptane_key_id, std::back_inserter(id_hex)); for (unsigned int i = 0; i < nkeys; i++) { // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) - if ((keys[i].id_len == config_.uptane_key_id.length() / 2) && + if ((keys[i].id_len == uptane_key_id.length() / 2) && // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) - (memcmp(keys[i].id, id_hex.data(), config_.uptane_key_id.length() / 2) == 0)) { + (memcmp(keys[i].id, id_hex.data(), uptane_key_id.length() / 2) == 0)) { // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) key = &keys[i]; break; @@ -209,20 +231,21 @@ bool P11Engine::readUptanePublicKey(std::string* key_out) { PEM_write_bio_PUBKEY(mem.get(), evp_key.get()); char* pem_key = nullptr; - long length = BIO_get_mem_data(mem.get(), &pem_key); // NOLINT(google-runtime-int) + // NOLINTNEXTLINE(google-runtime-int,cppcoreguidelines-pro-type-cstyle-cast) + long length = BIO_get_mem_data(mem.get(), &pem_key); key_out->assign(pem_key, static_cast(length)); return true; } -bool P11Engine::generateUptaneKeyPair() { +bool P11Engine::generateUptaneKeyPair(const std::string& uptane_key_id) { PKCS11_SLOT* slot = findTokenSlot(); if (slot == nullptr) { return false; } std::vector id_hex; - boost::algorithm::unhex(config_.uptane_key_id, std::back_inserter(id_hex)); + boost::algorithm::unhex(uptane_key_id, std::back_inserter(id_hex)); // Manually generate a key and store it on the HSM // Note that libp11 has a dedicated function marked as deprecated, it @@ -236,22 +259,22 @@ bool P11Engine::generateUptaneKeyPair() { } if (PKCS11_store_private_key(slot->token, pkey.get(), nullptr, id_hex.data(), id_hex.size()) != 0) { + LOG_ERROR << "Could not store private key on the token"; return false; } if (PKCS11_store_public_key(slot->token, pkey.get(), nullptr, id_hex.data(), id_hex.size()) != 0) { + LOG_ERROR << "Could not store public key on the token"; return false; } return true; } -bool P11Engine::readTlsCert(std::string* cert_out) const { - const std::string& id = config_.tls_clientcert_id; - - if (config_.module.empty()) { +bool P11Engine::readTlsCert(const std::string& id, std::string* cert_out) const { + if (module_path_.empty()) { return false; } - if ((id.length() % 2) != 0u) { + if ((id.length() % 2) != 0U) { return false; // id is a hex string } @@ -290,7 +313,8 @@ bool P11Engine::readTlsCert(std::string* cert_out) const { PEM_write_bio_X509(mem.get(), cert->x509); char* pem_key = nullptr; - long length = BIO_get_mem_data(mem.get(), &pem_key); // NOLINT(google-runtime-int) + // NOLINTNEXTLINE(google-runtime-int,cppcoreguidelines-pro-type-cstyle-cast) + long length = BIO_get_mem_data(mem.get(), &pem_key); cert_out->assign(pem_key, static_cast(length)); return true; diff --git a/src/libaktualizr/crypto/p11engine.h b/src/libaktualizr/crypto/p11engine.h index 3856d6998e..fc3ad7b71f 100644 --- a/src/libaktualizr/crypto/p11engine.h +++ b/src/libaktualizr/crypto/p11engine.h @@ -3,19 +3,22 @@ #include +#include "libaktualizr/config.h" + #include #include #include "gtest/gtest_prod.h" #include "logging/logging.h" -#include "p11_config.h" class P11ContextWrapper { public: explicit P11ContextWrapper(const boost::filesystem::path &module); - ~P11ContextWrapper(); + ~P11ContextWrapper(); // NOLINT(performance-trivially-destructible) P11ContextWrapper(const P11ContextWrapper &) = delete; + P11ContextWrapper(P11ContextWrapper &&) = delete; P11ContextWrapper &operator=(const P11ContextWrapper &) = delete; + P11ContextWrapper &operator=(P11ContextWrapper &&) = delete; PKCS11_ctx_st *get() const { return ctx; } private: @@ -25,15 +28,17 @@ class P11ContextWrapper { class P11SlotsWrapper { public: explicit P11SlotsWrapper(PKCS11_ctx_st *ctx_in); - ~P11SlotsWrapper(); + ~P11SlotsWrapper(); // NOLINT(performance-trivially-destructible) P11SlotsWrapper(const P11SlotsWrapper &) = delete; + P11SlotsWrapper(P11SlotsWrapper &&) = delete; P11SlotsWrapper &operator=(const P11SlotsWrapper &) = delete; - PKCS11_slot_st *get_slots() const { return wslots_; } + P11SlotsWrapper &operator=(P11SlotsWrapper &&) = delete; + PKCS11_slot_st *get_slots() const { return slots_; } unsigned int get_nslots() const { return nslots; } private: - PKCS11_ctx_st *ctx; // NOLINT - PKCS11_slot_st *wslots_; + PKCS11_ctx_st *ctx; + PKCS11_slot_st *slots_; unsigned int nslots; }; @@ -42,7 +47,9 @@ class P11EngineGuard; class P11Engine { public: P11Engine(const P11Engine &) = delete; + P11Engine(P11Engine &&) = delete; P11Engine &operator=(const P11Engine &) = delete; + P11Engine &operator=(P11Engine &&) = delete; virtual ~P11Engine() { if (ssl_engine_ != nullptr) { @@ -53,25 +60,24 @@ class P11Engine { } ENGINE *getEngine() { return ssl_engine_; } - std::string getUptaneKeyId() const { return uri_prefix_ + config_.uptane_key_id; } - std::string getTlsCacertId() const { return uri_prefix_ + config_.tls_cacert_id; } - std::string getTlsPkeyId() const { return uri_prefix_ + config_.tls_pkey_id; } - std::string getTlsCertId() const { return uri_prefix_ + config_.tls_clientcert_id; } - bool readUptanePublicKey(std::string *key_out); - bool readTlsCert(std::string *cert_out) const; - bool generateUptaneKeyPair(); + std::string getItemFullId(const std::string &id) const { return uri_prefix_ + id; } + bool readUptanePublicKey(const std::string &uptane_key_id, std::string *key_out); + bool readTlsCert(const std::string &id, std::string *cert_out) const; + bool generateUptaneKeyPair(const std::string &uptane_key_id); private: - const P11Config config_; + const boost::filesystem::path module_path_; + const std::string pass_; + const std::string label_; ENGINE *ssl_engine_{nullptr}; std::string uri_prefix_; P11ContextWrapper ctx_; - P11SlotsWrapper slots_; + P11SlotsWrapper wslots_; static boost::filesystem::path findPkcsLibrary(); PKCS11_slot_st *findTokenSlot() const; - explicit P11Engine(P11Config config); + explicit P11Engine(boost::filesystem::path module_path, std::string pass, std::string label); friend class P11EngineGuard; FRIEND_TEST(crypto, findPkcsLibrary); @@ -79,12 +85,13 @@ class P11Engine { class P11EngineGuard { public: - explicit P11EngineGuard(const P11Config &config) { + explicit P11EngineGuard(boost::filesystem::path module_path, std::string pass, std::string label) { if (instance == nullptr) { - instance = new P11Engine(config); + instance = new P11Engine(std::move(module_path), std::move(pass), std::move(label)); } ++ref_counter; - }; + } + ~P11EngineGuard() { if (ref_counter != 0) { --ref_counter; @@ -94,11 +101,17 @@ class P11EngineGuard { instance = nullptr; } } + + P11EngineGuard(const P11EngineGuard &) = delete; + P11EngineGuard(P11EngineGuard &&) = delete; + P11EngineGuard &operator=(const P11EngineGuard &) = delete; + P11EngineGuard &operator=(P11EngineGuard &&) = delete; + P11Engine *operator->() const { return instance; } private: - static P11Engine *instance; - static int ref_counter; + static P11Engine *instance; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) + static int ref_counter; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) }; #endif diff --git a/src/libaktualizr/crypto/p11engine_dummy.cc b/src/libaktualizr/crypto/p11engine_dummy.cc index 39a3fe3659..0260afa0cd 100644 --- a/src/libaktualizr/crypto/p11engine_dummy.cc +++ b/src/libaktualizr/crypto/p11engine_dummy.cc @@ -1,7 +1,7 @@ #include "p11engine.h" -P11Engine* P11EngineGuard::instance = nullptr; -int P11EngineGuard::ref_counter = 0; +P11Engine* P11EngineGuard::instance = nullptr; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) +int P11EngineGuard::ref_counter = 0; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) P11ContextWrapper::P11ContextWrapper(const boost::filesystem::path& module) : ctx(nullptr) { (void)module; @@ -10,7 +10,7 @@ P11ContextWrapper::P11ContextWrapper(const boost::filesystem::path& module) : ct P11ContextWrapper::~P11ContextWrapper() = default; -P11SlotsWrapper::P11SlotsWrapper(PKCS11_ctx_st* ctx_in) : ctx(nullptr), wslots_(nullptr), nslots(0) { +P11SlotsWrapper::P11SlotsWrapper(PKCS11_ctx_st* ctx_in) : ctx(nullptr), slots_(nullptr), nslots(0) { (void)ctx_in; throw std::runtime_error("Aktualizr was built without PKCS#11"); } diff --git a/src/libaktualizr/http/httpclient.cc b/src/libaktualizr/http/httpclient.cc index 916eb9de66..c414705aad 100644 --- a/src/libaktualizr/http/httpclient.cc +++ b/src/libaktualizr/http/httpclient.cc @@ -1,9 +1,11 @@ #include "httpclient.h" -#include +#include #include -#include "utilities/aktualizr_version.h" +#include +#include + #include "utilities/utils.h" struct WriteStringArg { @@ -35,7 +37,46 @@ static size_t writeString(void* contents, size_t size, size_t nmemb, void* userp return size * nmemb; } -HttpClient::HttpClient(const std::vector* extra_headers) { +struct ResponseHeaders { + explicit ResponseHeaders(const std::set& header_names_in) : header_names{header_names_in} {} + const std::set& header_names; + std::unordered_map headers; +}; + +static size_t header_callback(char* buffer, size_t size, size_t nitems, void* userdata) { + auto* const resp_headers{reinterpret_cast(userdata)}; + std::string header{buffer}; + + if (resp_headers == nullptr) { + LOG_WARNING << "Failed to cast the header callback context to `ResponseHeaders*`"; + return nitems * size; + } + + const auto split_pos{header.find(':')}; + if (std::string::npos != split_pos) { + std::string header_name{header.substr(0, split_pos)}; + std::string header_value{header.substr(split_pos + 1)}; + boost::trim_if(header_name, boost::is_any_of(" \t\r\n")); + boost::trim_if(header_value, boost::is_any_of(" \t\r\n")); + + boost::algorithm::to_lower(header_name); + boost::algorithm::to_lower(header_value); + + if (resp_headers->header_names.end() != resp_headers->header_names.find(header_name)) { + resp_headers->headers[header_name] = header_value; + } + } + return nitems * size; +} + +HttpClient::HttpClient(const std::vector* extra_headers, + const std::set* response_header_names) { + if (response_header_names != nullptr) { + for (const auto& name : *response_header_names) { + response_header_names_.emplace(boost::to_lower_copy(name)); + } + } + curl = curl_easy_init(); if (curl == nullptr) { throw std::runtime_error("Could not initialize curl"); @@ -47,13 +88,16 @@ HttpClient::HttpClient(const std::vector* extra_headers) { curlEasySetoptWrapper(curl, CURLOPT_CONNECTTIMEOUT, 60L); curlEasySetoptWrapper(curl, CURLOPT_CAPATH, Utils::getCaPath()); + curlEasySetoptWrapper(curl, CURLOPT_FOLLOWLOCATION, 1L); + curlEasySetoptWrapper(curl, CURLOPT_MAXREDIRS, 10L); + curlEasySetoptWrapper(curl, CURLOPT_POSTREDIR, CURL_REDIR_POST_301); + // let curl use our write function curlEasySetoptWrapper(curl, CURLOPT_WRITEFUNCTION, writeString); curlEasySetoptWrapper(curl, CURLOPT_WRITEDATA, NULL); curlEasySetoptWrapper(curl, CURLOPT_VERBOSE, get_curlopt_verbose()); - headers = curl_slist_append(headers, "Content-Type: application/json"); headers = curl_slist_append(headers, "Accept: */*"); if (extra_headers != nullptr) { @@ -61,68 +105,26 @@ HttpClient::HttpClient(const std::vector* extra_headers) { headers = curl_slist_append(headers, header.c_str()); } } - curlEasySetoptWrapper(curl, CURLOPT_HTTPHEADER, headers); curlEasySetoptWrapper(curl, CURLOPT_USERAGENT, Utils::getUserAgent()); } -HttpClient::HttpClient(const HttpClient& curl_in) : pkcs11_key(curl_in.pkcs11_key), pkcs11_cert(curl_in.pkcs11_key) { - curl = curl_easy_duphandle(curl_in.curl); - - struct curl_slist* inlist = curl_in.headers; - headers = nullptr; - struct curl_slist* tmp; - - while (inlist != nullptr) { - tmp = curl_slist_append(headers, inlist->data); - - if (tmp == nullptr) { - curl_slist_free_all(headers); - throw std::runtime_error("curl_slist_append returned null"); - } +HttpClient::HttpClient(const std::string& socket) : HttpClient() { + curlEasySetoptWrapper(curl, CURLOPT_UNIX_SOCKET_PATH, socket.c_str()); +} - headers = tmp; - inlist = inlist->next; - } +HttpClient::HttpClient(const HttpClient& curl_in) + : HttpInterface(curl_in), pkcs11_key(curl_in.pkcs11_key), pkcs11_cert(curl_in.pkcs11_key) { + curl = curl_easy_duphandle(curl_in.curl); + headers = curl_slist_dup(curl_in.headers); } -CurlGlobalInitWrapper HttpClient::manageCurlGlobalInit_{}; +const CurlGlobalInitWrapper HttpClient::manageCurlGlobalInit_{}; HttpClient::~HttpClient() { curl_slist_free_all(headers); curl_easy_cleanup(curl); } -HttpResponse HttpClient::get(const std::string& url, int64_t maxsize) { - CURL* curl_get = Utils::curlDupHandleWrapper(curl, pkcs11_key); - - // TODO: it is a workaround for an unidentified bug in libcurl. Ideally the bug itself should be fixed. - if (pkcs11_key) { - curlEasySetoptWrapper(curl_get, CURLOPT_SSLENGINE, "pkcs11"); - curlEasySetoptWrapper(curl_get, CURLOPT_SSLKEYTYPE, "ENG"); - } - - if (pkcs11_cert) { - curlEasySetoptWrapper(curl_get, CURLOPT_SSLCERTTYPE, "ENG"); - } - - // Clear POSTFIELDS to remove any lingering references to strings that have - // probably since been deallocated. - curlEasySetoptWrapper(curl_get, CURLOPT_POSTFIELDS, ""); - curlEasySetoptWrapper(curl_get, CURLOPT_URL, url.c_str()); - curlEasySetoptWrapper(curl_get, CURLOPT_HTTPGET, 1L); - if (maxsize >= 0) { - // it will only take effect if the server declares the size in advance, - // writeString callback takes care of the other case - curlEasySetoptWrapper(curl_get, CURLOPT_MAXFILESIZE_LARGE, maxsize); - } - curlEasySetoptWrapper(curl_get, CURLOPT_LOW_SPEED_TIME, speed_limit_time_interval_); - curlEasySetoptWrapper(curl_get, CURLOPT_LOW_SPEED_LIMIT, speed_limit_bytes_per_sec_); - LOG_DEBUG << "GET " << url; - HttpResponse response = perform(curl_get, RETRY_TIMES, maxsize); - curl_easy_cleanup(curl_get); - return response; -} - void HttpClient::setCerts(const std::string& ca, CryptoSource ca_source, const std::string& cert, CryptoSource cert_source, const std::string& pkey, CryptoSource pkey_source) { curlEasySetoptWrapper(curl, CURLOPT_SSL_VERIFYPEER, 1); @@ -164,38 +166,89 @@ void HttpClient::setCerts(const std::string& ca, CryptoSource ca_source, const s pkcs11_key = (pkey_source == CryptoSource::kPkcs11); } -HttpResponse HttpClient::post(const std::string& url, const Json::Value& data) { - CURL* curl_post = Utils::curlDupHandleWrapper(curl, pkcs11_key); +HttpResponse HttpClient::get(const std::string& url, int64_t maxsize) { + CURL* curl_get = dupHandle(curl, pkcs11_key); + + curlEasySetoptWrapper(curl_get, CURLOPT_HTTPHEADER, headers); + + if (pkcs11_cert) { + curlEasySetoptWrapper(curl_get, CURLOPT_SSLCERTTYPE, "ENG"); + } + + // Clear POSTFIELDS to remove any lingering references to strings that have + // probably since been deallocated. + curlEasySetoptWrapper(curl_get, CURLOPT_POSTFIELDS, ""); + curlEasySetoptWrapper(curl_get, CURLOPT_URL, url.c_str()); + curlEasySetoptWrapper(curl_get, CURLOPT_HTTPGET, 1L); + LOG_DEBUG << "GET " << url; + HttpResponse response = perform(curl_get, RETRY_TIMES, maxsize); + curl_easy_cleanup(curl_get); + return response; +} + +HttpResponse HttpClient::post(const std::string& url, const std::string& content_type, const std::string& data) { + CURL* curl_post = dupHandle(curl, pkcs11_key); + curl_slist* req_headers = curl_slist_dup(headers); + req_headers = curl_slist_append(req_headers, (std::string("Content-Type: ") + content_type).c_str()); + curlEasySetoptWrapper(curl_post, CURLOPT_HTTPHEADER, req_headers); curlEasySetoptWrapper(curl_post, CURLOPT_URL, url.c_str()); curlEasySetoptWrapper(curl_post, CURLOPT_POST, 1); - std::string data_str = Json::FastWriter().write(data); - curlEasySetoptWrapper(curl_post, CURLOPT_POSTFIELDS, data_str.c_str()); - LOG_TRACE << "post request body:" << data; + curlEasySetoptWrapper(curl_post, CURLOPT_POSTFIELDS, data.c_str()); auto result = perform(curl_post, RETRY_TIMES, HttpInterface::kPostRespLimit); curl_easy_cleanup(curl_post); + curl_slist_free_all(req_headers); return result; } -HttpResponse HttpClient::put(const std::string& url, const Json::Value& data) { - CURL* curl_put = Utils::curlDupHandleWrapper(curl, pkcs11_key); +HttpResponse HttpClient::post(const std::string& url, const Json::Value& data) { + std::string data_str = Utils::jsonToCanonicalStr(data); + LOG_TRACE << "post request body:" << data; + return post(url, "application/json", data_str); +} + +HttpResponse HttpClient::put(const std::string& url, const std::string& content_type, const std::string& data) { + CURL* curl_put = dupHandle(curl, pkcs11_key); + curl_slist* req_headers = curl_slist_dup(headers); + req_headers = curl_slist_append(req_headers, (std::string("Content-Type: ") + content_type).c_str()); + curlEasySetoptWrapper(curl_put, CURLOPT_HTTPHEADER, req_headers); curlEasySetoptWrapper(curl_put, CURLOPT_URL, url.c_str()); - std::string data_str = Json::FastWriter().write(data); - curlEasySetoptWrapper(curl_put, CURLOPT_POSTFIELDS, data_str.c_str()); + curlEasySetoptWrapper(curl_put, CURLOPT_POSTFIELDS, data.c_str()); curlEasySetoptWrapper(curl_put, CURLOPT_CUSTOMREQUEST, "PUT"); - LOG_TRACE << "put request body:" << data; HttpResponse result = perform(curl_put, RETRY_TIMES, HttpInterface::kPutRespLimit); curl_easy_cleanup(curl_put); + curl_slist_free_all(req_headers); return result; } +HttpResponse HttpClient::put(const std::string& url, const Json::Value& data) { + std::string data_str = Utils::jsonToCanonicalStr(data); + LOG_TRACE << "put request body:" << data; + return put(url, "application/json", data_str); +} + +// NOLINTNEXTLINE(misc-no-recursion) HttpResponse HttpClient::perform(CURL* curl_handler, int retry_times, int64_t size_limit) { + if (size_limit >= 0) { + // it will only take effect if the server declares the size in advance, + // writeString callback takes care of the other case + curlEasySetoptWrapper(curl_handler, CURLOPT_MAXFILESIZE_LARGE, size_limit); + } + curlEasySetoptWrapper(curl_handler, CURLOPT_LOW_SPEED_TIME, speed_limit_time_interval_); + curlEasySetoptWrapper(curl_handler, CURLOPT_LOW_SPEED_LIMIT, speed_limit_bytes_per_sec_); + WriteStringArg response_arg; response_arg.limit = size_limit; curlEasySetoptWrapper(curl_handler, CURLOPT_WRITEDATA, static_cast(&response_arg)); + ResponseHeaders resp_headers(response_header_names_); + if (!resp_headers.header_names.empty()) { + curlEasySetoptWrapper(curl_handler, CURLOPT_HEADERDATA, &resp_headers); + curlEasySetoptWrapper(curl_handler, CURLOPT_HEADERFUNCTION, header_callback); + } CURLcode result = curl_easy_perform(curl_handler); long http_code; // NOLINT(google-runtime-int) curl_easy_getinfo(curl_handler, CURLINFO_RESPONSE_CODE, &http_code); - HttpResponse response(response_arg.out, http_code, result, (result != CURLE_OK) ? curl_easy_strerror(result) : ""); + HttpResponse response(response_arg.out, http_code, result, (result != CURLE_OK) ? curl_easy_strerror(result) : "", + std::move(resp_headers.headers)); if (response.curl_code != CURLE_OK || response.http_status_code >= 500) { std::ostringstream error_message; error_message << "curl error " << response.curl_code << " (http code " << response.http_status_code @@ -203,6 +256,7 @@ HttpResponse HttpClient::perform(CURL* curl_handler, int retry_times, int64_t si LOG_ERROR << error_message.str(); if (retry_times != 0) { sleep(1); + // NOLINTNEXTLINE(misc-no-recursion) response = perform(curl_handler, --retry_times, size_limit); } } @@ -219,7 +273,7 @@ HttpResponse HttpClient::download(const std::string& url, curl_write_callback wr std::future HttpClient::downloadAsync(const std::string& url, curl_write_callback write_cb, curl_xferinfo_callback progress_cb, void* userp, curl_off_t from, CurlHandler* easyp) { - CURL* curl_download = Utils::curlDupHandleWrapper(curl, pkcs11_key); + CURL* curl_download = dupHandle(curl, pkcs11_key); CurlHandler curlp = CurlHandler(curl_download, curl_easy_cleanup); @@ -227,9 +281,9 @@ std::future HttpClient::downloadAsync(const std::string& url, curl *easyp = curlp; } + curlEasySetoptWrapper(curl_download, CURLOPT_HTTPHEADER, headers); curlEasySetoptWrapper(curl_download, CURLOPT_URL, url.c_str()); curlEasySetoptWrapper(curl_download, CURLOPT_HTTPGET, 1L); - curlEasySetoptWrapper(curl_download, CURLOPT_FOLLOWLOCATION, 1L); curlEasySetoptWrapper(curl_download, CURLOPT_WRITEFUNCTION, write_cb); curlEasySetoptWrapper(curl_download, CURLOPT_WRITEDATA, userp); if (progress_cb != nullptr) { @@ -245,11 +299,17 @@ std::future HttpClient::downloadAsync(const std::string& url, curl std::promise resp_promise; auto resp_future = resp_promise.get_future(); std::thread( - [curlp](std::promise promise) { + [curlp, this](std::promise promise) { + ResponseHeaders resp_headers(response_header_names_); + if (!resp_headers.header_names.empty()) { + curlEasySetoptWrapper(curlp.get(), CURLOPT_HEADERDATA, &resp_headers); + curlEasySetoptWrapper(curlp.get(), CURLOPT_HEADERFUNCTION, header_callback); + } CURLcode result = curl_easy_perform(curlp.get()); long http_code; // NOLINT(google-runtime-int) curl_easy_getinfo(curlp.get(), CURLINFO_RESPONSE_CODE, &http_code); - HttpResponse response("", http_code, result, (result != CURLE_OK) ? curl_easy_strerror(result) : ""); + HttpResponse response("", http_code, result, (result != CURLE_OK) ? curl_easy_strerror(result) : "", + std::move(resp_headers.headers)); promise.set_value(response); }, std::move(resp_promise)) @@ -257,4 +317,77 @@ std::future HttpClient::downloadAsync(const std::string& url, curl return resp_future; } +bool HttpClient::updateHeader(const std::string& name, const std::string& value) { + curl_slist* item = headers; + std::string lookfor(name + ": "); + + while (item != nullptr) { + if (strncmp(lookfor.c_str(), item->data, lookfor.length()) == 0) { + free(item->data); // NOLINT(cppcoreguidelines-no-malloc, hicpp-no-malloc) + lookfor += value; + item->data = strdup(lookfor.c_str()); + return true; + } + item = item->next; + } + return false; +} + +void HttpClient::timeout(int64_t ms) { + // curl_easy_setopt() takes a 'long' be very sure that we are passing + // whatever the platform ABI thinks is a long, while keeping the external + // interface a clang-tidy preferred int64 + auto ms_long = static_cast(ms); // NOLINT(google-runtime-int) + curlEasySetoptWrapper(curl, CURLOPT_TIMEOUT_MS, ms_long); + curlEasySetoptWrapper(curl, CURLOPT_CONNECTTIMEOUT_MS, ms_long); +} + +curl_slist* HttpClient::curl_slist_dup(curl_slist* sl) { + curl_slist* new_list = nullptr; + + for (curl_slist* item = sl; item != nullptr; item = item->next) { + new_list = curl_slist_append(new_list, item->data); + } + + return new_list; +} + +/* Locking for curl share instance */ +static void curl_share_lock_cb(CURL* handle, curl_lock_data data, curl_lock_access access, void* userptr) { + (void)handle; + (void)access; + auto* mutexes = static_cast*>(userptr); + mutexes->at(data).lock(); +} + +static void curl_share_unlock_cb(CURL* handle, curl_lock_data data, void* userptr) { + (void)handle; + auto* mutexes = static_cast*>(userptr); + mutexes->at(data).unlock(); +} + +void HttpClientWithShare::initCurlShare() { + share_ = curl_share_init(); + if (share_ == nullptr) { + throw std::runtime_error("Could not initialize share"); + } + + curl_share_setopt(share_, CURLSHOPT_SHARE, CURL_LOCK_DATA_SSL_SESSION); + curl_share_setopt(share_, CURLSHOPT_SHARE, CURL_LOCK_DATA_DNS); + curl_share_setopt(share_, CURLSHOPT_SHARE, CURL_LOCK_DATA_CONNECT); + curl_share_setopt(share_, CURLSHOPT_LOCKFUNC, curl_share_lock_cb); + curl_share_setopt(share_, CURLSHOPT_UNLOCKFUNC, curl_share_unlock_cb); + curl_share_setopt(share_, CURLSHOPT_USERDATA, &curl_share_mutexes); +} + +HttpClientWithShare::HttpClientWithShare(const std::vector* extra_headers, + const std::set* response_header_names) + : HttpClient(extra_headers, response_header_names) { + initCurlShare(); +} +HttpClientWithShare::HttpClientWithShare(const std::string& socket) : HttpClient(socket) { initCurlShare(); } + +HttpClientWithShare::HttpClientWithShare(const HttpClientWithShare& curl_in) : HttpClient(curl_in) { initCurlShare(); } + +HttpClientWithShare::~HttpClientWithShare() { curl_share_cleanup(share_); } // vim: set tabstop=2 shiftwidth=2 expandtab: diff --git a/src/libaktualizr/http/httpclient.h b/src/libaktualizr/http/httpclient.h index 3d8470e856..b5a82fe8d6 100644 --- a/src/libaktualizr/http/httpclient.h +++ b/src/libaktualizr/http/httpclient.h @@ -3,14 +3,13 @@ #include #include +#include #include #include "gtest/gtest_prod.h" #include "json/json.h" #include "httpinterface.h" -#include "logging/logging.h" -#include "utilities/utils.h" /** * Helper class to manage curl_global_init/curl_global_cleanup calls @@ -19,19 +18,26 @@ class CurlGlobalInitWrapper { public: CurlGlobalInitWrapper() { curl_global_init(CURL_GLOBAL_DEFAULT); } ~CurlGlobalInitWrapper() { curl_global_cleanup(); } - CurlGlobalInitWrapper &operator=(const CurlGlobalInitWrapper &) = delete; CurlGlobalInitWrapper(const CurlGlobalInitWrapper &) = delete; - CurlGlobalInitWrapper &operator=(CurlGlobalInitWrapper &&) = delete; CurlGlobalInitWrapper(CurlGlobalInitWrapper &&) = delete; + CurlGlobalInitWrapper &operator=(const CurlGlobalInitWrapper &) = delete; + CurlGlobalInitWrapper &operator=(CurlGlobalInitWrapper &&) = delete; }; class HttpClient : public HttpInterface { public: - HttpClient(const std::vector *extra_headers = nullptr); - HttpClient(const HttpClient & /*curl_in*/); + explicit HttpClient(const std::vector *extra_headers = nullptr, + const std::set *response_header_names = nullptr); + explicit HttpClient(const std::string &socket); + HttpClient(const HttpClient &curl_in); // non-default! ~HttpClient() override; + HttpClient(HttpClient &&) = default; + HttpClient &operator=(const HttpClient &) = delete; + HttpClient &operator=(HttpClient &&) = default; HttpResponse get(const std::string &url, int64_t maxsize) override; + HttpResponse post(const std::string &url, const std::string &content_type, const std::string &data) override; HttpResponse post(const std::string &url, const Json::Value &data) override; + HttpResponse put(const std::string &url, const std::string &content_type, const std::string &data) override; HttpResponse put(const std::string &url, const Json::Value &data) override; HttpResponse download(const std::string &url, curl_write_callback write_cb, curl_xferinfo_callback progress_cb, @@ -41,24 +47,21 @@ class HttpClient : public HttpInterface { CurlHandler *easyp) override; void setCerts(const std::string &ca, CryptoSource ca_source, const std::string &cert, CryptoSource cert_source, const std::string &pkey, CryptoSource pkey_source) override; + bool updateHeader(const std::string &name, const std::string &value); + void timeout(int64_t ms); private: FRIEND_TEST(GetTest, download_speed_limit); - /** - * These are here to catch a common programming error where a Json::Value is - * implicitly constructed from a std::string. By having an private overload - * that takes string (and with no implementation), this will fail during - * compilation. - */ - HttpResponse post(const std::string &url, std::string data); - HttpResponse put(const std::string &url, std::string data); - static CurlGlobalInitWrapper manageCurlGlobalInit_; + static const CurlGlobalInitWrapper manageCurlGlobalInit_; CURL *curl; curl_slist *headers; HttpResponse perform(CURL *curl_handler, int retry_times, int64_t size_limit); + static curl_slist *curl_slist_dup(curl_slist *sl); + virtual CURL *dupHandle(CURL *const curl_in, const bool using_pkcs11) { + return Utils::curlDupHandleWrapper(curl_in, using_pkcs11, nullptr); + } - static CURLcode sslCtxFunction(CURL *handle, void *sslctx, void *parm); std::unique_ptr tls_ca_file; std::unique_ptr tls_cert_file; std::unique_ptr tls_pkey_file; @@ -74,5 +77,29 @@ class HttpClient : public HttpInterface { } bool pkcs11_key{false}; bool pkcs11_cert{false}; + std::set response_header_names_; }; + +class HttpClientWithShare : public HttpClient { + public: + explicit HttpClientWithShare(const std::vector *extra_headers = nullptr, + const std::set *response_header_names = nullptr); + explicit HttpClientWithShare(const std::string &socket); + HttpClientWithShare(const HttpClientWithShare &curl_in); // non-default! + ~HttpClientWithShare() override; + HttpClientWithShare(HttpClientWithShare &&) = delete; + HttpClientWithShare &operator=(const HttpClientWithShare &) = delete; + HttpClientWithShare &operator=(HttpClientWithShare &&) = delete; + + protected: + CURL *dupHandle(CURL *const curl_in, const bool using_pkcs11) override { + return Utils::curlDupHandleWrapper(curl_in, using_pkcs11, share_); + } + void initCurlShare(); + + private: + CURLSH *share_{nullptr}; + std::array curl_share_mutexes; +}; + #endif diff --git a/src/libaktualizr/http/httpclient_test.cc b/src/libaktualizr/http/httpclient_test.cc index 46a2b42339..ac681e145f 100644 --- a/src/libaktualizr/http/httpclient_test.cc +++ b/src/libaktualizr/http/httpclient_test.cc @@ -9,8 +9,8 @@ #include "json/json.h" #include "http/httpclient.h" +#include "libaktualizr/types.h" #include "test_utils.h" -#include "utilities/types.h" #include "utilities/utils.h" static std::string server = "http://127.0.0.1:"; @@ -100,7 +100,22 @@ TEST(HttpClient, user_agent) { } } -// TODO: add tests for HttpClient::download +TEST(Headers, update_header) { + std::vector headers = {"Authorization: Bearer bad"}; + HttpClient http(&headers); + + ASSERT_FALSE(http.updateHeader("NOSUCHHEADER", "foo")); + + std::string path = "/auth_call"; + std::string body = http.get(server + path, HttpInterface::kNoLimit).body; + EXPECT_EQ(body, "{}"); + + ASSERT_TRUE(http.updateHeader("Authorization", "Bearer token")); + Json::Value response = http.get(server + path, HttpInterface::kNoLimit).getJson(); + EXPECT_EQ(response["status"].asString(), "good"); +} + +// TODO(OTA-4546): add tests for HttpClient::download #ifndef __NO_MAIN__ int main(int argc, char** argv) { diff --git a/src/libaktualizr/http/httpinterface.h b/src/libaktualizr/http/httpinterface.h index 10e43457bb..4cb46642db 100644 --- a/src/libaktualizr/http/httpinterface.h +++ b/src/libaktualizr/http/httpinterface.h @@ -8,7 +8,8 @@ #include #include "json/json.h" -#include "utilities/types.h" +#include "libaktualizr/types.h" +#include "logging/logging.h" #include "utilities/utils.h" using CurlHandler = std::shared_ptr; @@ -16,22 +17,25 @@ using CurlHandler = std::shared_ptr; struct HttpResponse { HttpResponse() = default; HttpResponse(std::string body_in, const long http_status_code_in, // NOLINT(google-runtime-int) - CURLcode curl_code_in, std::string error_message_in) + CURLcode curl_code_in, std::string error_message_in, + std::unordered_map &&headers_in = {}) : body(std::move(body_in)), http_status_code(http_status_code_in), curl_code(curl_code_in), - error_message(std::move(error_message_in)) {} + error_message(std::move(error_message_in)), + headers{headers_in} {} std::string body; long http_status_code{0}; // NOLINT(google-runtime-int) CURLcode curl_code{CURLE_OK}; std::string error_message; - bool isOk() { return (curl_code == CURLE_OK && http_status_code >= 200 && http_status_code < 400); } - bool wasInterrupted() { return curl_code == CURLE_ABORTED_BY_CALLBACK; }; - std::string getStatusStr() { + std::unordered_map headers; + bool isOk() const { return (curl_code == CURLE_OK && http_status_code >= 200 && http_status_code < 400); } + bool wasInterrupted() const { return curl_code == CURLE_ABORTED_BY_CALLBACK; }; + std::string getStatusStr() const { return std::to_string(curl_code) + " " + error_message + " HTTP " + std::to_string(http_status_code); } - Json::Value getJson() { return Utils::parseJSON(body); } + Json::Value getJson() const { return Utils::parseJSON(body); } }; class HttpInterface { @@ -39,7 +43,9 @@ class HttpInterface { HttpInterface() = default; virtual ~HttpInterface() = default; virtual HttpResponse get(const std::string &url, int64_t maxsize) = 0; + virtual HttpResponse post(const std::string &url, const std::string &content_type, const std::string &data) = 0; virtual HttpResponse post(const std::string &url, const Json::Value &data) = 0; + virtual HttpResponse put(const std::string &url, const std::string &content_type, const std::string &data) = 0; virtual HttpResponse put(const std::string &url, const Json::Value &data) = 0; virtual HttpResponse download(const std::string &url, curl_write_callback write_cb, @@ -52,6 +58,12 @@ class HttpInterface { static constexpr int64_t kNoLimit = 0; // no limit the size of downloaded data static constexpr int64_t kPostRespLimit = 64 * 1024; static constexpr int64_t kPutRespLimit = 64 * 1024; + + protected: + HttpInterface(const HttpInterface &) = default; + HttpInterface(HttpInterface &&) = default; + HttpInterface &operator=(const HttpInterface &) = default; + HttpInterface &operator=(HttpInterface &&) = default; }; #endif // HTTPINTERFACE_H_ diff --git a/src/libaktualizr/isotp_conn/CMakeLists.txt b/src/libaktualizr/isotp_conn/CMakeLists.txt deleted file mode 100644 index 17ade9cc8e..0000000000 --- a/src/libaktualizr/isotp_conn/CMakeLists.txt +++ /dev/null @@ -1,20 +0,0 @@ -set(ISOTP_PATH_PREFIX ${PROJECT_SOURCE_DIR}/third_party/isotp-c/src) -set(BITFIELD_PATH_PREFIX ${ISOTP_PATH_PREFIX}/../deps/bitfield-c/src) - -set(ISOTP_SOURCES ${ISOTP_PATH_PREFIX}/isotp/isotp.c - ${ISOTP_PATH_PREFIX}/isotp/send.c - ${ISOTP_PATH_PREFIX}/isotp/receive.c - ${BITFIELD_PATH_PREFIX}/bitfield/8byte.c - ${BITFIELD_PATH_PREFIX}/bitfield/bitarray.c - ${BITFIELD_PATH_PREFIX}/bitfield/bitfield.c) - -set_source_files_properties(${ISOTP_SOURCES} PROPERTIES COMPILE_FLAGS "-Wno-sign-conversion -Wno-conversion -Wno-error=unused-parameter") - -set(SOURCES isotp_conn.cc isotp_allocate.cc) - -set(HEADERS isotp_conn.h) - -add_library(isotp_conn OBJECT ${SOURCES} ${ISOTP_SOURCES}) -target_include_directories(isotp_conn PUBLIC ${ISOTP_PATH_PREFIX} ${BITFIELD_PATH_PREFIX}) - -aktualizr_source_file_checks(${SOURCES} ${HEADERS}) diff --git a/src/libaktualizr/isotp_conn/isotp_allocate.cc b/src/libaktualizr/isotp_conn/isotp_allocate.cc deleted file mode 100644 index 79fe5275d6..0000000000 --- a/src/libaktualizr/isotp_conn/isotp_allocate.cc +++ /dev/null @@ -1,7 +0,0 @@ -#include -#include -extern "C" { -uint8_t* allocate(size_t size) { return static_cast(malloc((sizeof(uint8_t)) * size)); } - -void free_allocated(uint8_t* data) { free(data); } -} diff --git a/src/libaktualizr/isotp_conn/isotp_conn.cc b/src/libaktualizr/isotp_conn/isotp_conn.cc deleted file mode 100644 index 5850d3a27d..0000000000 --- a/src/libaktualizr/isotp_conn/isotp_conn.cc +++ /dev/null @@ -1,205 +0,0 @@ -#include "isotp_conn.h" -#include -#include -#include -#include -#include - -#include -#include - -#include - -#include -#include - -#include - -#include "logging/logging.h" - -IsoTpSendRecv::IsoTpSendRecv(std::string can_iface_, uint16_t canaddr_rx_, uint16_t canaddr_tx_) - : can_iface{std::move(can_iface_)}, canaddr_rx{canaddr_rx_}, canaddr_tx{canaddr_tx_} { - can_socket = socket(PF_CAN, SOCK_RAW, CAN_RAW); - - if (can_socket < -1) { - throw std::runtime_error("Unable to open socket"); - } - - struct can_filter filter {}; - filter.can_id = canaddr_rx & 0x7FF; - filter.can_mask = 0x7FF; - setsockopt(can_socket, SOL_CAN_RAW, CAN_RAW_FILTER, &filter, sizeof(filter)); - - struct ifreq ifr {}; - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) - memcpy(ifr.ifr_name, can_iface.c_str(), IFNAMSIZ); - - if (ioctl(can_socket, SIOCGIFINDEX, &ifr) != 0) { - throw std::runtime_error("Unable to get interface index"); - } - - struct sockaddr_can addr {}; - addr.can_family = AF_CAN; - addr.can_ifindex = ifr.ifr_ifindex; // NOLINT - - if (bind(can_socket, reinterpret_cast(&addr), sizeof(addr)) < 0) { - throw std::runtime_error("Unable to bind socket"); - } - - isotp_shims = isotp_init_shims(nullptr, canSend, nullptr, this); -} - -bool IsoTpSendRecv::canSend(uint32_t arbitration_id, const uint8_t* data, uint8_t size, void* private_data) { - auto* instance = static_cast(private_data); - - if ((instance == nullptr) || size > 8) { - return false; - } - - LOG_TRACE << "Sending CAN message AF: 0x" << std::hex << arbitration_id << "; Data:"; - LOG_TRACE << " " << boost::algorithm::hex(std::string(reinterpret_cast(data), size)); - - int can_socket = instance->can_socket; - - struct can_frame frame {}; - - frame.can_id = arbitration_id; - frame.can_dlc = size; - memcpy(frame.data, data, size); - - ssize_t res = write(can_socket, &frame, sizeof(frame)); - if (res < 0) { - LOG_ERROR << "CAN write error: " << strerror(errno); - return false; - } - if (res != sizeof(frame)) { - LOG_ERROR << "CAN write error: " << res << " bytes of " << sizeof(frame) << " were sent"; - return false; - } - return true; -} - -bool IsoTpSendRecv::Send(const std::string& out) { - IsoTpMessage message_tx = isotp_new_send_message(canaddr_tx, reinterpret_cast(out.c_str()), - static_cast(out.length())); - IsoTpSendHandle send_handle = isotp_send(&isotp_shims, &message_tx, nullptr); - if (send_handle.completed) { - if (send_handle.success) { - return true; - } - LOG_ERROR << "ISO/TP message send failed"; - return false; - } else { - while (true) { - fd_set read_set; - FD_ZERO(&read_set); - FD_SET(can_socket, &read_set); - - // struct timeval timeout = {0, 20000}; // 20 ms - // if (select((can_socket + 1), &read_set, nullptr, nullptr, &timeout) >= 0) { - if (select((can_socket + 1), &read_set, nullptr, nullptr, nullptr) >= 0) { - if (FD_ISSET(can_socket, &read_set)) { - struct can_frame f {}; - ssize_t ret = read(can_socket, &f, sizeof(f)); - if (ret < 0) { - std::cerr << "Error receiving CAN frame" << std::endl; - return false; - } - - LOG_TRACE << "Reveived CAN message in Send method AF: 0x" << std::hex << f.can_id << "; Data:"; - LOG_TRACE << " " << boost::algorithm::hex(std::string(reinterpret_cast(f.data), f.can_dlc)); - - if (!isotp_receive_flowcontrol(&isotp_shims, &send_handle, static_cast(f.can_id), f.data, - f.can_dlc)) { - std::cerr << "IsoTp receiving error" << std::endl; - return false; - } - - while (send_handle.to_send != 0) { - if (send_handle.gap_ms != 0) { - std::this_thread::sleep_for(std::chrono::milliseconds(send_handle.gap_ms)); - } - if (send_handle.gap_us != 0) { - std::this_thread::sleep_for(std::chrono::microseconds(send_handle.gap_us)); - } - - if (!isotp_continue_send(&isotp_shims, &send_handle)) { - LOG_ERROR << "IsoTp sending error"; - return false; - } - if (send_handle.completed) { - // Wait before (potentially) sending another packet - if (send_handle.gap_ms != 0) { - std::this_thread::sleep_for(std::chrono::milliseconds(send_handle.gap_ms)); - } - if (send_handle.gap_us != 0) { - std::this_thread::sleep_for(std::chrono::microseconds(send_handle.gap_us)); - } - - if (send_handle.success) { - return true; - } - LOG_ERROR << "IsoTp send failed"; - return false; - } - } - - } else { - LOG_TRACE << "Timeout on CAN socket"; - return true; - } - if (send_handle.completed) { - break; - } - } else { - std::cerr << "Select failed" << std::endl; - return false; - } - } - } - return false; -} - -bool IsoTpSendRecv::Recv(std::string* in) { - IsoTpReceiveHandle recv_handle = isotp_receive(&isotp_shims, canaddr_tx, canaddr_rx, nullptr); - - while (true) { - fd_set read_set; - FD_ZERO(&read_set); - FD_SET(can_socket, &read_set); - - // struct timeval timeout = {0, 2000000}; // 20 ms - // if (select((can_socket + 1), &read_set, nullptr, nullptr, &timeout) >= 0) { - if (select((can_socket + 1), &read_set, nullptr, nullptr, nullptr) >= 0) { - if (FD_ISSET(can_socket, &read_set)) { - struct can_frame f {}; - ssize_t ret = read(can_socket, &f, sizeof(f)); - if (ret < 0) { - std::cerr << "Error receiving CAN frame" << std::endl; - return false; - } - - LOG_TRACE << "Reveived CAN message in Recv method AF: 0x" << std::hex << f.can_id << "; Data:"; - LOG_TRACE << " " << boost::algorithm::hex(std::string(reinterpret_cast(f.data), f.can_dlc)); - // std::this_thread::sleep_for(std::chrono::milliseconds(10)); // hack for RIOT to start waiting for flow - // control - IsoTpMessage message_rx = isotp_continue_receive(&isotp_shims, &recv_handle, f.can_id, f.data, f.can_dlc); - if (message_rx.completed && recv_handle.completed) { - if (!recv_handle.success) { - std::cerr << "IsoTp receiving error" << std::endl; - return false; - } - *in = std::string(reinterpret_cast(message_rx.payload), static_cast(message_rx.size)); - return true; - } - } else { - LOG_TRACE << "Timeout on CAN socket"; - *in = ""; - return false; - } - } else { - LOG_ERROR << "Select failed"; - return false; - } - } -} diff --git a/src/libaktualizr/isotp_conn/isotp_conn.h b/src/libaktualizr/isotp_conn/isotp_conn.h deleted file mode 100644 index ea191d369c..0000000000 --- a/src/libaktualizr/isotp_conn/isotp_conn.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef UPTANE_ISOTP_SEND_RECV_H_ -#define UPTANE_ISOTP_SEND_RECV_H_ - -#include -#include -#include "isotp/isotp.h" - -class IsoTpSendRecv { - public: - IsoTpSendRecv(std::string can_iface_, uint16_t canaddr_rx_, uint16_t canaddr_tx_); - bool Send(const std::string& out); - bool SendRecv(const std::string& out, std::string* in) { return Send(out) && Recv(in); } - - private: - std::string can_iface; - uint16_t canaddr_rx; - uint16_t canaddr_tx; - int can_socket; - IsoTpShims isotp_shims{}; - - bool Recv(std::string* in); - static bool canSend(uint32_t arbitration_id, const uint8_t* data, uint8_t size, void* private_data); -}; - -#endif // UPTANE_ISOTP_SEND_RECV_H_ diff --git a/src/libaktualizr/logging/CMakeLists.txt b/src/libaktualizr/logging/CMakeLists.txt index 43cab7bf12..bf25d58d34 100644 --- a/src/libaktualizr/logging/CMakeLists.txt +++ b/src/libaktualizr/logging/CMakeLists.txt @@ -1,12 +1,5 @@ -set(SOURCES logging.cc logging_config.cc) - -if(ANDROID) - list(APPEND SOURCES android_log_sink.cc) -else() - list(APPEND SOURCES default_log_sink.cc) -endif() - -set(HEADERS logging_config.h logging.h) +set(SOURCES logging.cc logging_config.cc default_log_sink.cc) +set(HEADERS logging.h) add_library(logging OBJECT ${SOURCES}) -aktualizr_source_file_checks(logging.cc logging_config.cc android_log_sink.cc default_log_sink.cc ${HEADERS}) +aktualizr_source_file_checks(${SOURCES} ${HEADERS}) diff --git a/src/libaktualizr/logging/android_log_sink.cc b/src/libaktualizr/logging/android_log_sink.cc deleted file mode 100644 index 0232115e15..0000000000 --- a/src/libaktualizr/logging/android_log_sink.cc +++ /dev/null @@ -1,23 +0,0 @@ -#include -#include -#include - -namespace log = boost::log; - -class android_log_sink : public log::sinks::basic_sink_backend { - public: - explicit android_log_sink() {} - - void consume(log::record_view const& rec) { - const auto& rec_message_attr = rec[log::aux::default_attribute_names::message()]; - int log_priority = android_LogPriority::ANDROID_LOG_VERBOSE + - rec[log::aux::default_attribute_names::severity()].extract_or_default(0); - __android_log_write(log_priority, "aktualizr", rec_message_attr.extract_or_default(std::string("N/A")).c_str()); - } -}; - -void logger_init_sink(bool use_colors = false) { - (void)use_colors; - typedef log::sinks::synchronous_sink android_log_sink_t; - log::core::get()->add_sink(boost::shared_ptr(new android_log_sink_t())); -} diff --git a/src/libaktualizr/logging/default_log_sink.cc b/src/libaktualizr/logging/default_log_sink.cc index b0c150e512..54819a5dc7 100644 --- a/src/libaktualizr/logging/default_log_sink.cc +++ b/src/libaktualizr/logging/default_log_sink.cc @@ -30,7 +30,11 @@ static void color_fmt(boost::log::record_view const& rec, boost::log::formatting } void logger_init_sink(bool use_colors = false) { - auto sink = boost::log::add_console_log(std::cout, boost::log::keywords::format = "%Message%", + auto* stream = &std::cerr; + if (getenv("LOG_STDERR") == nullptr) { + stream = &std::cout; + } + auto sink = boost::log::add_console_log(*stream, boost::log::keywords::format = "%Message%", boost::log::keywords::auto_flush = true); if (use_colors) { sink->set_formatter(&color_fmt); diff --git a/src/libaktualizr/logging/logging.cc b/src/libaktualizr/logging/logging.cc index 305949192e..bc4918df90 100644 --- a/src/libaktualizr/logging/logging.cc +++ b/src/libaktualizr/logging/logging.cc @@ -1,8 +1,13 @@ #include "logging.h" +#include +#include + +#include "libaktualizr/config.h" + using boost::log::trivial::severity_level; -static severity_level gLoggingThreshold; +static severity_level gLoggingThreshold; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) extern void logger_init_sink(bool use_colors = false); diff --git a/src/libaktualizr/logging/logging.h b/src/libaktualizr/logging/logging.h index 66b9955c3e..297d59e293 100644 --- a/src/libaktualizr/logging/logging.h +++ b/src/libaktualizr/logging/logging.h @@ -1,11 +1,10 @@ #ifndef SOTA_CLIENT_TOOLS_LOGGING_H_ #define SOTA_CLIENT_TOOLS_LOGGING_H_ -#include -#include #include +#include -#include "logging_config.h" +struct LoggerConfig; /** Log an unrecoverable error */ #define LOG_FATAL BOOST_LOG_TRIVIAL(fatal) diff --git a/src/libaktualizr/logging/logging_config.cc b/src/libaktualizr/logging/logging_config.cc index 965773a0b9..ff1c35fcbf 100644 --- a/src/libaktualizr/logging/logging_config.cc +++ b/src/libaktualizr/logging/logging_config.cc @@ -1,6 +1,6 @@ #include -#include "logging_config.h" +#include "libaktualizr/config.h" #include "utilities/config_utils.h" void LoggerConfig::updateFromPropertyTree(const boost::property_tree::ptree& pt) { diff --git a/src/libaktualizr/logging/logging_config.h b/src/libaktualizr/logging/logging_config.h deleted file mode 100644 index 0c203deaf8..0000000000 --- a/src/libaktualizr/logging/logging_config.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef LOGGING_CONFIG_H -#define LOGGING_CONFIG_H - -#include -#include - -struct LoggerConfig { - int loglevel{2}; - - void updateFromPropertyTree(const boost::property_tree::ptree& pt); - void writeToStream(std::ostream& out_stream) const; -}; - -#endif // LOGGING_CONFIG diff --git a/src/libaktualizr/package_manager/CMakeLists.txt b/src/libaktualizr/package_manager/CMakeLists.txt index 7b212851f4..8e716f47d3 100644 --- a/src/libaktualizr/package_manager/CMakeLists.txt +++ b/src/libaktualizr/package_manager/CMakeLists.txt @@ -2,10 +2,7 @@ set(SOURCES packagemanagerfactory.cc packagemanagerfake.cc packagemanagerinterface.cc) -set(HEADERS packagemanagerconfig.h - packagemanagerfactory.h - packagemanagerfake.h - packagemanagerinterface.h) +set(HEADERS packagemanagerfake.h) add_library(package_manager OBJECT ${SOURCES}) aktualizr_source_file_checks(${SOURCES} packagemanagerconfig.cc ${HEADERS}) @@ -14,23 +11,9 @@ target_sources(config PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/packagemanagerconfig.c add_aktualizr_test(NAME packagemanagerfake SOURCES packagemanagerfake_test.cc LIBRARIES PUBLIC uptane_generator_lib) -# Debian backend -if(BUILD_DEB) - set_property(SOURCE packagemanagerfactory.cc packagemanagerfactory_test.cc PROPERTY COMPILE_DEFINITIONS BUILD_DEB) - target_sources(package_manager PRIVATE debianmanager.cc) - add_executable(t_packagemanager_deb EXCLUDE_FROM_ALL debianmanager_test.cc) - add_dependencies(build_tests t_packagemanager_deb) - target_link_libraries(t_packagemanager_deb aktualizr_static_lib ${TEST_LIBS}) - - add_test(NAME test_packagemanager_deb COMMAND ${PROJECT_SOURCE_DIR}/tests/run_debian_tests.sh ${CMAKE_CURRENT_BINARY_DIR}/t_packagemanager_deb - ${PROJECT_SOURCE_DIR}/tests/test_data/fake_dpkg) - -endif(BUILD_DEB) -aktualizr_source_file_checks(debianmanager.cc debianmanager.h debianmanager_test.cc) - # OSTree backend if(BUILD_OSTREE) - target_sources(package_manager PRIVATE ostreemanager.cc ostreereposync.cc) + target_sources(package_manager PRIVATE ostreemanager.cc) target_include_directories(package_manager PUBLIC ${LIBOSTREE_INCLUDE_DIRS}) add_custom_target(make_ostree_sysroot @@ -40,29 +23,19 @@ if(BUILD_OSTREE) add_aktualizr_test(NAME ostreemanager SOURCES ostreemanager_test.cc PROJECT_WORKING_DIRECTORY ARGS ${PROJECT_BINARY_DIR}/ostree_repo) - - if(BUILD_DOCKERAPP) - target_sources(package_manager PRIVATE dockerappmanager.cc) - add_aktualizr_test(NAME dockerapp SOURCES dockerappmanager_test.cc PROJECT_WORKING_DIRECTORY - ARGS ${PROJECT_BINARY_DIR}/ostree_repo "$") - endif(BUILD_DOCKERAPP) endif(BUILD_OSTREE) +add_aktualizr_test(NAME packagemanagerconfig SOURCES packagemanagerconfig_test.cc NO_VALGRIND) add_aktualizr_test(NAME packagemanager_factory SOURCES packagemanagerfactory_test.cc ARGS ${PROJECT_BINARY_DIR}/ostree_repo) add_aktualizr_test(NAME fetcher SOURCES fetcher_test.cc ARGS PROJECT_WORKING_DIRECTORY LIBRARIES PUBLIC uptane_generator_lib) add_aktualizr_test(NAME fetcher_death SOURCES fetcher_death_test.cc NO_VALGRIND ARGS PROJECT_WORKING_DIRECTORY) -aktualizr_source_file_checks(fetcher_death_test.cc fetcher_test.cc) - -aktualizr_source_file_checks(packagemanagerfake_test.cc packagemanagerfactory_test.cc ostreemanager_test.cc) - -aktualizr_source_file_checks(ostreemanager.cc ostreereposync.cc - ostreemanager.h ostreereposync.h) - -aktualizr_source_file_checks(androidmanager.cc androidmanager.h) -aktualizr_source_file_checks(dockerappmanager.cc dockerappmanager.h dockerappmanager_test.cc) - -if(ANDROID) - target_sources(package_manager PRIVATE androidmanager.cc) -endif(ANDROID) +aktualizr_source_file_checks(fetcher_death_test.cc + fetcher_test.cc + packagemanagerconfig_test.cc + packagemanagerfake_test.cc + packagemanagerfactory_test.cc + ostreemanager_test.cc + ostreemanager.cc + ostreemanager.h) diff --git a/src/libaktualizr/package_manager/androidmanager.cc b/src/libaktualizr/package_manager/androidmanager.cc deleted file mode 100644 index 6aa364db55..0000000000 --- a/src/libaktualizr/package_manager/androidmanager.cc +++ /dev/null @@ -1,127 +0,0 @@ -#include -#include -#include - -#include "androidmanager.h" - -#include "utilities/utils.h" - -#include -#include -#include -#include - -namespace qi = boost::spirit::qi; -namespace fs = boost::filesystem; - -const std::string AndroidManager::data_ota_package_dir_ = "/data/ota_package"; - -Json::Value AndroidManager::getInstalledPackages() const { - using boost::phoenix::copy; - using qi::_1; - using qi::char_; - - std::string pm_output; - Json::Value packages(Json::arrayValue); - if (0 != Utils::shell("pm list packages --show-versioncode", &pm_output)) { - return packages; - } - - qi::rule()> char_seq = qi::lexeme[*(char_ - ' ')]; - - std::istringstream pv_lines(pm_output); - for (std::string line; std::getline(pv_lines, line);) { - std::string p, v; - if (qi::parse(line.begin(), line.end(), - ("package:" >> char_seq[copy(_1, std::back_inserter(p))] >> ' ' >> "versionCode:" >> - char_seq[copy(_1, std::back_inserter(v))]))) { - Json::Value package; - package["name"] = p; - package["version"] = v; - packages.append(package); - } - } - return packages; -} - -Uptane::Target AndroidManager::getCurrent() const { - using boost::phoenix::push_front; - using boost::spirit::ascii::xdigit; - using qi::_1; - - std::string getprop_output; - if (0 == Utils::shell("getprop ota.last_installed_package_file", &getprop_output)) { - std::forward_list hash; - qi::phrase_parse(getprop_output.crbegin(), getprop_output.crend(), - *(xdigit[push_front(boost::phoenix::ref(hash), _1)]), boost::spirit::ascii::cntrl); - std::vector installed_versions; - storage_->loadPrimaryInstallationLog(&installed_versions, false); - for (const auto& target : installed_versions) { - if (std::equal(hash.cbegin(), hash.cend(), target.sha256Hash().cbegin())) { - return target; - } - } - } - return Uptane::Target::Unknown(); -} - -data::InstallationResult AndroidManager::install(const Uptane::Target& target) const { - LOG_INFO << "Begin Android package installation"; - auto package_filename = (fs::path(data_ota_package_dir_) / target.filename()).string() + "." + target.sha256Hash(); - std::ofstream package_file(package_filename.c_str()); - if (!package_file.good()) { - throw std::runtime_error(std::string("Error opening file ") + package_filename); - } - package_file << *storage_->openTargetFile(target); - - if (bootloader_ != nullptr) { - bootloader_->rebootFlagSet(); - } - LOG_INFO << "Performing sync()"; - sync(); - return data::InstallationResult(data::ResultCode::Numeric::kNeedCompletion, "need reboot"); -} - -data::InstallationResult AndroidManager::finalizeInstall(const Uptane::Target& target) const { - std::string ota_package_file_path = GetOTAPackageFilePath(target.sha256Hash()); - if (!ota_package_file_path.empty()) fs::remove(ota_package_file_path); - std::string errorMessage{"n/a"}; - if (installationAborted(&errorMessage)) { - return data::InstallationResult(data::ResultCode::Numeric::kInstallFailed, errorMessage); - } - return data::InstallationResult(data::ResultCode::Numeric::kOk, "package installation successfully finalized"); -} - -std::string AndroidManager::GetOTAPackageFilePath(const std::string& hash) { - fs::directory_iterator entryItEnd, entryIt{fs::path(data_ota_package_dir_)}; - for (; entryIt != entryItEnd; ++entryIt) { - auto& entry_path = entryIt->path(); - if (boost::filesystem::is_directory(*entryIt)) { - continue; - } - auto ext = entry_path.extension().string(); - ext = ext.substr(1); - if (ext == hash) { - return entry_path.string(); - } - } - return std::string{}; -} - -bool AndroidManager::installationAborted(std::string* errorMessage) const { - std::string installation_log_file{"/cache/recovery/last_install"}; - std::ifstream log(installation_log_file.c_str()); - if (!log.good()) { - throw std::runtime_error(std::string("Error opening file ") + installation_log_file); - } - for (std::string line; std::getline(log, line);) { - if (boost::algorithm::find_first(line, "error:")) { - int error_code = std::stoi(line.substr(6)); - if (error_code != 0) { - *errorMessage = std::string("Error code: ") + std::to_string(error_code); - return true; - } - } - } - return false; -} diff --git a/src/libaktualizr/package_manager/androidmanager.h b/src/libaktualizr/package_manager/androidmanager.h deleted file mode 100644 index b8ea8482cc..0000000000 --- a/src/libaktualizr/package_manager/androidmanager.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef ANDROIDMANAGER_H -#define ANDROIDMANAGER_H - -#include "package_manager/packagemanagerinterface.h" - -class AndroidInstallationDispatcher; - -class AndroidManager : public PackageManagerInterface { - public: - explicit AndroidManager(PackageConfig pconfig, std::shared_ptr storage, - std::shared_ptr bootloader, std::shared_ptr http) - : PackageManagerInterface(std::move(pconfig), std::move(storage), std::move(bootloader), std::move(http)) {} - ~AndroidManager() override = default; - std::string name() const override { return "android"; } - Json::Value getInstalledPackages() const override; - - Uptane::Target getCurrent() const override; - bool imageUpdated() override { return true; }; - - data::InstallationResult install(const Uptane::Target& target) const override; - data::InstallationResult finalizeInstall(const Uptane::Target& target) const override; - - static std::string GetOTAPackageFilePath(const std::string& hash); - - private: - bool installationAborted(std::string* errorMessage) const; - static const std::string data_ota_package_dir_; -}; - -#endif // ANDROIDMANAGER_H diff --git a/src/libaktualizr/package_manager/debianmanager.cc b/src/libaktualizr/package_manager/debianmanager.cc deleted file mode 100644 index c65a567446..0000000000 --- a/src/libaktualizr/package_manager/debianmanager.cc +++ /dev/null @@ -1,42 +0,0 @@ -#include "package_manager/debianmanager.h" - -#include -#include - -Json::Value DebianManager::getInstalledPackages() const { - // Currently not implemented - return Json::Value(Json::arrayValue); -} - -data::InstallationResult DebianManager::install(const Uptane::Target &target) const { - std::lock_guard guard(mutex_); - LOG_INFO << "Installing " << target.filename() << " as Debian package..."; - std::string cmd = "dpkg -i "; - std::string output; - TemporaryDirectory package_dir("deb_dir"); - auto target_file = storage_->openTargetFile(target); - - boost::filesystem::path deb_path = package_dir / target.filename(); - target_file->writeToFile(deb_path); - target_file->rclose(); - - int status = Utils::shell(cmd + deb_path.string(), &output, true); - if (status == 0) { - LOG_INFO << "... Installation of Debian package successful"; - storage_->savePrimaryInstalledVersion(target, InstalledVersionUpdateMode::kCurrent); - return data::InstallationResult(data::ResultCode::Numeric::kOk, "Installing debian package was successful"); - } - LOG_ERROR << "... Installation of Debian package failed"; - return data::InstallationResult(data::ResultCode::Numeric::kInstallFailed, output); -} - -Uptane::Target DebianManager::getCurrent() const { - boost::optional current_version; - storage_->loadPrimaryInstalledVersions(¤t_version, nullptr); - - if (!!current_version) { - return *current_version; - } - - return Uptane::Target::Unknown(); -} diff --git a/src/libaktualizr/package_manager/debianmanager.h b/src/libaktualizr/package_manager/debianmanager.h deleted file mode 100644 index ec31a57409..0000000000 --- a/src/libaktualizr/package_manager/debianmanager.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef DEB_H_ -#define DEB_H_ - -#include -#include -#include - -#include "packagemanagerinterface.h" - -class DebianManager : public PackageManagerInterface { - public: - DebianManager(PackageConfig pconfig, std::shared_ptr storage, std::shared_ptr bootloader, - std::shared_ptr http) - : PackageManagerInterface(std::move(pconfig), std::move(storage), std::move(bootloader), std::move(http)) {} - ~DebianManager() override = default; - std::string name() const override { return "debian"; } - Json::Value getInstalledPackages() const override; - Uptane::Target getCurrent() const override; - data::InstallationResult install(const Uptane::Target &target) const override; - data::InstallationResult finalizeInstall(const Uptane::Target &target) const override { - (void)target; - throw std::runtime_error("Unimplemented"); - } - bool imageUpdated() override { return true; } - - private: - mutable std::mutex mutex_; -}; - -#endif // DEB_H_ diff --git a/src/libaktualizr/package_manager/debianmanager_test.cc b/src/libaktualizr/package_manager/debianmanager_test.cc deleted file mode 100644 index ebad5a53a4..0000000000 --- a/src/libaktualizr/package_manager/debianmanager_test.cc +++ /dev/null @@ -1,73 +0,0 @@ -#include - -#include -#include - -#include "config/config.h" -#include "package_manager/packagemanagerfactory.h" -#include "package_manager/packagemanagerinterface.h" -#include "storage/invstorage.h" -#include "utilities/utils.h" - -TEST(PackageManagerFactory, Debian_Install_Good) { - Config config; - config.pacman.type = PackageManager::kDebian; - TemporaryDirectory dir; - config.storage.path = dir.Path(); - - std::shared_ptr storage = INvStorage::newStorage(config.storage); - std::shared_ptr pacman = - PackageManagerFactory::makePackageManager(config.pacman, storage, nullptr, nullptr); - EXPECT_TRUE(pacman); - Json::Value target_json; - target_json["hashes"]["sha256"] = "hash"; - target_json["length"] = 2; - target_json["custom"]["ecuIdentifiers"]["primary_serial"]["hardwareId"] = "primary_hwid"; - Uptane::Target target("good.deb", target_json); - - storage->storeEcuSerials({{Uptane::EcuSerial("primary_serial"), Uptane::HardwareIdentifier("primary_hwid")}}); - - Json::Value target_json_test; - target_json_test["hashes"]["sha256"] = "hash_old"; - target_json_test["length"] = 2; - Uptane::Target target_test("test.deb", target_json_test); - storage->savePrimaryInstalledVersion(target_test, InstalledVersionUpdateMode::kCurrent); - std::unique_ptr fhandle = storage->allocateTargetFile(false, target); - std::stringstream("ab") >> *fhandle; - fhandle->wcommit(); - - EXPECT_EQ(pacman->install(target).result_code.num_code, data::ResultCode::Numeric::kOk); - EXPECT_TRUE(pacman->getCurrent().MatchTarget(target)); -} - -TEST(PackageManagerFactory, Debian_Install_Bad) { - Config config; - config.pacman.type = PackageManager::kDebian; - TemporaryDirectory dir; - config.storage.path = dir.Path(); - std::shared_ptr storage = INvStorage::newStorage(config.storage); - std::shared_ptr pacman = - PackageManagerFactory::makePackageManager(config.pacman, storage, nullptr, nullptr); - EXPECT_TRUE(pacman); - Json::Value target_json; - target_json["hashes"]["sha256"] = "hash"; - target_json["length"] = 2; - target_json["custom"]["ecuIdentifiers"]["primary_serial"]["hardwareId"] = "primary_hwid"; - Uptane::Target target("bad.deb", target_json); - - std::unique_ptr fhandle = storage->allocateTargetFile(false, target); - std::stringstream("ab") >> *fhandle; - fhandle->wcommit(); - - auto result = pacman->install(target); - EXPECT_EQ(result.result_code.num_code, data::ResultCode::Numeric::kInstallFailed); - EXPECT_EQ(result.description, "Error installing"); -} - -#ifndef __NO_MAIN__ -int main(int argc, char **argv) { - ::testing::InitGoogleTest(&argc, argv); - - return RUN_ALL_TESTS(); -} -#endif diff --git a/src/libaktualizr/package_manager/docker_fake.sh b/src/libaktualizr/package_manager/docker_fake.sh deleted file mode 100755 index 290dbc166d..0000000000 --- a/src/libaktualizr/package_manager/docker_fake.sh +++ /dev/null @@ -1,34 +0,0 @@ -#! /bin/bash -set -eEo pipefail - -if [ -n "$DOCKER_APP_FAIL" ] ; then - echo "FAILING the fake docker app command" - exit 1 -fi - -if [ "$1" = "render" ] ; then - echo "DOCKER-APP RENDER OUTPUT" - if [ ! -f app1.dockerapp ] ; then - echo "Missing docker app file!" - exit 1 - fi - cat app1.dockerapp - exit 0 -fi -if [ "$1" = "up" ] ; then - echo "DOCKER-COMPOSE UP" - if [ ! -f docker-compose.yml ] ; then - echo "Missing docker-compose file!" - exit 1 - fi - # the content of dockerapp includes the sha of the target, so this should - # be present in the docker-compose.yml it creates. - if ! grep primary docker-compose.yml ; then - echo "Could not find expected content in docker-compose.yml" - cat docker-compose.yml - exit 1 - fi - exit 0 -fi -echo "Unknown command: $*" -exit 1 diff --git a/src/libaktualizr/package_manager/dockerapp_test_repo.sh b/src/libaktualizr/package_manager/dockerapp_test_repo.sh deleted file mode 100755 index 19b50ba94b..0000000000 --- a/src/libaktualizr/package_manager/dockerapp_test_repo.sh +++ /dev/null @@ -1,34 +0,0 @@ -#! /bin/bash -set -eEuo pipefail - -if [ "$#" -lt 3 ]; then - echo "Usage: $0 " - exit 1 -fi - -UPTANE_GENERATOR="$1" -DEST_DIR="$2" -PORT="$3" - -uptane_gen() { - "$UPTANE_GENERATOR" --path "$DEST_DIR" "$@" -} - -mkdir -p "$DEST_DIR" -trap 'rm -rf "$DEST_DIR"' ERR - -IMAGES=$(mktemp -d) -trap 'rm -rf "$IMAGES"' exit -DOCKER_APP="$IMAGES/foo.dockerapp" -echo "fake contents of a docker app" > "$DOCKER_APP" - -uptane_gen --command generate --expires 2021-07-04T16:33:27Z -uptane_gen --command image --filename "$DOCKER_APP" --targetname foo.dockerapp --hwid primary_hw -uptane_gen --command addtarget --hwid primary_hw --serial CA:FE:A6:D2:84:9D --targetname foo.dockerapp -uptane_gen --command signtargets - -cd $DEST_DIR -echo "Target.json is: " -cat repo/repo/targets.json -echo "Running repo server port on $PORT" -exec python3 -m http.server $PORT diff --git a/src/libaktualizr/package_manager/dockerappmanager.cc b/src/libaktualizr/package_manager/dockerappmanager.cc deleted file mode 100644 index 0fc3aefc90..0000000000 --- a/src/libaktualizr/package_manager/dockerappmanager.cc +++ /dev/null @@ -1,168 +0,0 @@ -#include "dockerappmanager.h" - -#include - -/** - * @brief This package manager compliments the OSTreePackageManager by also including optional Docker Apps. - * - * A full description of the Docker App project can be found here: - * https://github.com/docker/app/ - * - * Docker Apps are very analogous to docker-compose. In fact, this module - * currently "renders" the docker-app file into a docker-compose file. Each - * Docker App appears as a Target in the TUF targets list. Each OStree target - * can then reference these docker apps in its custom data section. eg: - * - * "targets": { - * httpd.dockerapp-1 : { - * "custom" : {"hardwareIds" : ["all"], "name" : "httpd.dockerapp", "version" : "1"}, - * "hashes" : {"sha256" : "f0ad4e3ce6a5e9cb70c9d747e977fddfacd08419deec0714622029b12dde8338"}, - * "length" : 889 - * }, - * "raspberrypi3-64-lmp-144" : { - * "custom" : { - * "docker_apps" : { - * "httpd" : { - * "filename" : "httpd.dockerapp-1" - * } - * }, - * "hardwareIds" : ["raspberrypi3-64"], - * "name" : "raspberrypi3-64-lmp", - * "targetFormat" : "OSTREE", - * "version" : "144" - * }, - * "hashes" : {"sha256" : "20ac4f7cd50cda6bfed0caa1f8231cc9a7e40bec60026c66df5f7e143af96942"}, - * "length" : 0 - * } - * } - */ - -struct DockerApp { - DockerApp(std::string app_name, const PackageConfig &config) - : name(std::move(app_name)), - app_root(config.docker_apps_root / name), - app_params(config.docker_app_params), - app_bin(config.docker_app_bin), - compose_bin(config.docker_compose_bin) {} - - bool render(const std::string &app_content, bool persist) { - auto bin = boost::filesystem::canonical(app_bin).string(); - Utils::writeFile(app_root / (name + ".dockerapp"), app_content); - std::string cmd("cd " + app_root.string() + " && " + bin + " render " + name); - if (!app_params.empty()) { - cmd += " -f " + app_params.string(); - } - std::string yaml; - if (Utils::shell(cmd, &yaml, true) != 0) { - LOG_ERROR << "Unable to run " << cmd << " output:\n" << yaml; - return false; - } - if (persist) { - Utils::writeFile(app_root / "docker-compose.yml", yaml); - } - return true; - } - - bool start() { - // Depending on the number and size of the containers in the docker-app, - // this command can take a bit of time to complete. Rather than using, - // Utils::shell which isn't interactive, we'll use std::system so that - // stdout/stderr is streamed while docker sets things up. - auto bin = boost::filesystem::canonical(compose_bin).string(); - std::string cmd("cd " + app_root.string() + " && " + bin + " up --remove-orphans -d"); - return std::system(cmd.c_str()) == 0; - } - - std::string name; - boost::filesystem::path app_root; - boost::filesystem::path app_params; - boost::filesystem::path app_bin; - boost::filesystem::path compose_bin; -}; - -bool DockerAppManager::iterate_apps(const Uptane::Target &target, const DockerAppCb &cb) const { - auto apps = target.custom_data()["docker_apps"]; - bool res = true; - Uptane::ImagesRepository repo; - // checkMetaOffline pulls in data from INvStorage to properly initialize - // the targets member of the instance so that we can use the LazyTargetList - repo.checkMetaOffline(*storage_); - - if (!apps) { - LOG_DEBUG << "Detected an update target from Director with no docker-apps data"; - for (const auto &t : Uptane::LazyTargetsList(repo, storage_, fake_fetcher_)) { - if (t.MatchTarget(target)) { - LOG_DEBUG << "Found the match " << t; - apps = t.custom_data()["docker_apps"]; - break; - } - } - } - - for (const auto &t : Uptane::LazyTargetsList(repo, storage_, fake_fetcher_)) { - for (Json::ValueIterator i = apps.begin(); i != apps.end(); ++i) { - if ((*i).isObject() && (*i).isMember("filename")) { - for (const auto &app : config.docker_apps) { - if (i.key().asString() == app && (*i)["filename"].asString() == t.filename()) { - if (!cb(app, t)) { - res = false; - } - } - } - } else { - LOG_ERROR << "Invalid custom data for docker-app: " << i.key().asString() << " -> " << *i; - } - } - } - return res; -} - -bool DockerAppManager::fetchTarget(const Uptane::Target &target, Uptane::Fetcher &fetcher, const KeyManager &keys, - FetcherProgressCb progress_cb, const api::FlowControlToken *token) { - if (!OstreeManager::fetchTarget(target, fetcher, keys, progress_cb, token)) { - return false; - } - - LOG_INFO << "Looking for DockerApps to fetch"; - auto cb = [this, &fetcher, &keys, progress_cb, token](const std::string &app, const Uptane::Target &app_target) { - LOG_INFO << "Fetching " << app << " -> " << app_target; - return PackageManagerInterface::fetchTarget(app_target, fetcher, keys, progress_cb, token); - }; - return iterate_apps(target, cb); -} - -data::InstallationResult DockerAppManager::install(const Uptane::Target &target) const { - auto res = OstreeManager::install(target); - auto cb = [this](const std::string &app, const Uptane::Target &app_target) { - LOG_INFO << "Installing " << app << " -> " << app_target; - std::stringstream ss; - ss << *storage_->openTargetFile(app_target); - DockerApp dapp(app, config); - return dapp.render(ss.str(), true) && dapp.start(); - }; - if (!iterate_apps(target, cb)) { - return data::InstallationResult(data::ResultCode::Numeric::kInstallFailed, "Could not render docker app"); - } - return res; -} - -TargetStatus DockerAppManager::verifyTarget(const Uptane::Target &target) const { - TargetStatus status; - if (target.IsOstree()) { - status = OstreeManager::verifyTarget(target); - if (status != TargetStatus::kGood) { - return status; - } - } - auto cb = [this](const std::string &app, const Uptane::Target &app_target) { - LOG_INFO << "Verifying " << app << " -> " << app_target; - std::stringstream ss; - ss << *storage_->openTargetFile(app_target); - DockerApp dapp(app, config); - return dapp.render(ss.str(), false); - }; - if (!iterate_apps(target, cb)) { - return TargetStatus::kInvalid; - } - return TargetStatus::kGood; -} diff --git a/src/libaktualizr/package_manager/dockerappmanager.h b/src/libaktualizr/package_manager/dockerappmanager.h deleted file mode 100644 index 3931791a61..0000000000 --- a/src/libaktualizr/package_manager/dockerappmanager.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef DOCKERAPPMGR_H_ -#define DOCKERAPPMGR_H_ - -#include "ostreemanager.h" -#include "uptane/iterator.h" - -using DockerAppCb = std::function; - -class DockerAppManager : public OstreeManager { - public: - DockerAppManager(PackageConfig pconfig, std::shared_ptr storage, std::shared_ptr bootloader, - std::shared_ptr http) - : OstreeManager(std::move(pconfig), std::move(storage), std::move(bootloader), std::move(http)) { - fake_fetcher_ = std::make_shared(Config(), http_); - } - bool fetchTarget(const Uptane::Target &target, Uptane::Fetcher &fetcher, const KeyManager &keys, - FetcherProgressCb progress_cb, const api::FlowControlToken *token) override; - data::InstallationResult install(const Uptane::Target &target) const override; - TargetStatus verifyTarget(const Uptane::Target &target) const override; - std::string name() const override { return "ostree+docker-app"; } - - private: - bool iterate_apps(const Uptane::Target &target, const DockerAppCb &cb) const; - - // iterate_apps needs an Uptane::Fetcher. However, its an unused parameter - // and we just need to construct a dummy one to make the compiler happy. - std::shared_ptr fake_fetcher_; -}; -#endif // DOCKERAPPMGR_H_ diff --git a/src/libaktualizr/package_manager/dockerappmanager_test.cc b/src/libaktualizr/package_manager/dockerappmanager_test.cc deleted file mode 100644 index 44406217f8..0000000000 --- a/src/libaktualizr/package_manager/dockerappmanager_test.cc +++ /dev/null @@ -1,135 +0,0 @@ -#include - -#include -#include - -#include "config/config.h" -#include "http/httpclient.h" -#include "package_manager/packagemanagerfactory.h" -#include "package_manager/packagemanagerinterface.h" -#include "primary/sotauptaneclient.h" -#include "storage/invstorage.h" -#include "test_utils.h" -#include "uptane/fetcher.h" - -static std::string repo_server = "http://127.0.0.1:"; -static std::string treehub_server = "http://127.0.0.1:"; -static boost::filesystem::path test_sysroot; -static boost::filesystem::path uptane_gen; - -static std::shared_ptr newTestClient(Config& config_in, std::shared_ptr storage_in, - std::shared_ptr http_client_in, - std::shared_ptr events_channel_in = nullptr) { - std::shared_ptr bootloader_in = std::make_shared(config_in.bootloader, *storage_in); - std::shared_ptr report_queue_in = std::make_shared(config_in, http_client_in); - - return std::make_shared(config_in, storage_in, http_client_in, bootloader_in, report_queue_in, - events_channel_in); -} - -static void progress_cb(const Uptane::Target& target, const std::string& description, unsigned int progress) { - (void)description; - LOG_INFO << "progress_cb " << target << " " << progress; -} - -static std::unique_ptr create_repo(const boost::filesystem::path& repo_path) { - std::string port = TestUtils::getFreePort(); - repo_server += port; - auto p = std_::make_unique("src/libaktualizr/package_manager/dockerapp_test_repo.sh", - uptane_gen, repo_path, port); - TestUtils::waitForServer(repo_server + "/"); - return p; -} - -TEST(DockerAppManager, PackageManager_Factory_Good) { - Config config; - config.pacman.type = PackageManager::kOstreeDockerApp; - config.pacman.sysroot = test_sysroot; - TemporaryDirectory dir; - config.storage.path = dir.Path(); - - std::shared_ptr storage = INvStorage::newStorage(config.storage); - auto pacman = PackageManagerFactory::makePackageManager(config.pacman, storage, nullptr, nullptr); - EXPECT_TRUE(pacman); -} - -TEST(DockerAppManager, DockerApp_Fetch) { - std::string sha = Utils::readFile(test_sysroot / "ostree/repo/refs/heads/ostree/1/1/0", true); - Json::Value target_json; - target_json["hashes"]["sha256"] = sha; - target_json["custom"]["targetFormat"] = "OSTREE"; - target_json["length"] = 0; - target_json["custom"]["docker_apps"]["app1"]["filename"] = "foo.dockerapp"; - Uptane::Target target("pull", target_json); - - TemporaryDirectory temp_dir; - auto repo = temp_dir.Path(); - auto repod = create_repo(repo); - - Config config; - config.pacman.type = PackageManager::kOstreeDockerApp; - config.pacman.sysroot = test_sysroot; - config.pacman.docker_apps_root = temp_dir / "docker_apps"; - config.pacman.docker_apps.push_back("app1"); - config.pacman.docker_app_bin = config.pacman.docker_compose_bin = "src/libaktualizr/package_manager/docker_fake.sh"; - config.pacman.ostree_server = treehub_server; - config.uptane.repo_server = repo_server + "/repo/repo"; - TemporaryDirectory dir; - config.storage.path = dir.Path(); - - std::shared_ptr storage = INvStorage::newStorage(config.storage); - KeyManager keys(storage, config.keymanagerConfig()); - auto http = std::make_shared(); - auto client = newTestClient(config, storage, http, nullptr); - ASSERT_TRUE(client->updateImagesMeta()); - - std::string targets = Utils::readFile(repo / "repo/repo/targets.json"); - LOG_INFO << "Repo targets " << targets; - - bool result = client->package_manager_->fetchTarget(target, *(client->uptane_fetcher), keys, progress_cb, nullptr); - ASSERT_TRUE(result); - - auto hashes = std::vector{ - Uptane::Hash(Uptane::Hash::Type::kSha256, "dfca385c923400228c8ddd3c2d572919985e48a9409a2d71dab33148017231c3"), - Uptane::Hash(Uptane::Hash::Type::kSha512, - "76b183d51f53613a450825afc6f984077b68ae7b321ba041a2b3871f3c25a9a20d964ad0b60352e5fdd09b78fd53879f4e3" - "fa3dcc8335b26d3bbf455803d2ecb")}; - Uptane::Target app_target("foo.dockerapp", Uptane::EcuMap{}, hashes, 8); - ASSERT_TRUE(storage->checkTargetFile(app_target)); - - client->package_manager_->install(target); - std::string content = Utils::readFile(config.pacman.docker_apps_root / "app1/docker-compose.yml"); - ASSERT_EQ("DOCKER-APP RENDER OUTPUT\nfake contents of a docker app\n", content); - - setenv("DOCKER_APP_FAIL", "1", 1); - ASSERT_EQ(TargetStatus::kInvalid, client->VerifyTarget(target)); -} - -#ifndef __NO_MAIN__ -int main(int argc, char** argv) { - ::testing::InitGoogleTest(&argc, argv); - - if (argc != 3) { - std::cerr << "Error: " << argv[0] - << " requires the path to an OSTree sysroot and uptane-generator as an input argument.\n"; - return EXIT_FAILURE; - } - uptane_gen = argv[2]; - - std::string port = TestUtils::getFreePort(); - treehub_server += port; - boost::process::child server_process("tests/fake_http_server/fake_test_server.py", port); - - TemporaryDirectory temp_dir; - // Utils::copyDir doesn't work here. Complaints about non existent symlink path - int r = system((std::string("cp -r ") + argv[1] + std::string(" ") + temp_dir.PathString()).c_str()); - if (r != 0) { - return -1; - } - test_sysroot = (temp_dir.Path() / "ostree_repo").string(); - - TestUtils::waitForServer(treehub_server + "/"); - - return RUN_ALL_TESTS(); -} -#endif diff --git a/src/libaktualizr/package_manager/fetcher_death_test.cc b/src/libaktualizr/package_manager/fetcher_death_test.cc index 6e340d155f..3681f2254f 100644 --- a/src/libaktualizr/package_manager/fetcher_death_test.cc +++ b/src/libaktualizr/package_manager/fetcher_death_test.cc @@ -8,16 +8,19 @@ #include #include -#include "config/config.h" +#include "crypto/keymanager.h" #include "http/httpclient.h" +#include "libaktualizr/config.h" #include "logging/logging.h" #include "package_manager/packagemanagerfake.h" #include "storage/sqlstorage.h" #include "test_utils.h" +#include "uptane/fetcher.h" #include "uptane/tuf.h" +#include "utilities/apiqueue.h" -static const int die_after = 50; // percent -static const int pause_duration = 20; // seconds +static const int die_after = 50; // percent +static const int pause_duration = 2; // seconds std::string server; @@ -45,7 +48,7 @@ static void progress_cb(const Uptane::Target& target, const std::string& descrip void resume(const Uptane::Target& target) { std::shared_ptr storage(new SQLStorage(config.storage, false)); auto http = std::make_shared(); - auto pacman = std::make_shared(config.pacman, storage, nullptr, http); + auto pacman = std::make_shared(config.pacman, config.bootloader, storage, http); api::FlowControlToken token; KeyManager keys(storage, config.keymanagerConfig()); Uptane::Fetcher fetcher(config, http); @@ -56,11 +59,11 @@ void resume(const Uptane::Target& target) { EXPECT_TRUE(res); } -void pause_and_die(const Uptane::Target& target) { +void try_and_die(const Uptane::Target& target, bool graceful) { std::shared_ptr storage(new SQLStorage(config.storage, false)); auto http = std::make_shared(); api::FlowControlToken token; - auto pacman = std::make_shared(config.pacman, storage, nullptr, http); + auto pacman = std::make_shared(config.pacman, config.bootloader, storage, http); KeyManager keys(storage, config.keymanagerConfig()); Uptane::Fetcher fetcher(config, http); @@ -70,17 +73,37 @@ void pause_and_die(const Uptane::Target& target) { std::thread([&target, &fetcher, &download_promise, &token, pacman, &keys]() { bool res = pacman->fetchTarget(target, fetcher, keys, progress_cb, &token); download_promise.set_value(res); - }) - .detach(); + }).detach(); std::unique_lock lk(pause_m); cv.wait(lk, [] { return die; }); - token.setPause(true); - std::this_thread::sleep_for(std::chrono::seconds(pause_duration)); - std::raise(SIGKILL); + if (graceful) { + token.setPause(true); + std::this_thread::sleep_for(std::chrono::seconds(pause_duration)); + std::raise(SIGTERM); + } else { + std::raise(SIGKILL); + } +} + +TEST(FetcherDeathTest, TestResumeAfterPause) { + TemporaryDirectory temp_dir; + config.storage.path = temp_dir.Path(); + config.pacman.images_path = temp_dir.Path() / "images"; + + Json::Value target_json; + target_json["hashes"]["sha256"] = "dd7bd1c37a3226e520b8d6939c30991b1c08772d5dab62b381c3a63541dc629a"; + target_json["length"] = 100 * (1 << 20); + + Uptane::Target target("large_file", target_json); + die = false; + resumed = false; + ASSERT_DEATH(try_and_die(target, true), ""); + std::cout << "Fetcher died, retrying" << std::endl; + resume(target); } -TEST(FetcherDeathTest, TestResumeBinary) { +TEST(FetcherDeathTest, TestResumeAfterSigkill) { TemporaryDirectory temp_dir; config.storage.path = temp_dir.Path(); @@ -89,7 +112,9 @@ TEST(FetcherDeathTest, TestResumeBinary) { target_json["length"] = 100 * (1 << 20); Uptane::Target target("large_file", target_json); - ASSERT_DEATH(pause_and_die(target), ""); + die = false; + resumed = false; + ASSERT_DEATH(try_and_die(target, false), ""); std::cout << "Fetcher died, retrying" << std::endl; resume(target); } diff --git a/src/libaktualizr/package_manager/fetcher_test.cc b/src/libaktualizr/package_manager/fetcher_test.cc index 49a7645521..567e1cb033 100644 --- a/src/libaktualizr/package_manager/fetcher_test.cc +++ b/src/libaktualizr/package_manager/fetcher_test.cc @@ -1,5 +1,6 @@ #include +#include #include #include #include @@ -8,14 +9,16 @@ #include -#include "config/config.h" +#include "crypto/keymanager.h" #include "http/httpclient.h" #include "httpfake.h" +#include "libaktualizr/config.h" +#include "libaktualizr/packagemanagerfactory.h" #include "logging/logging.h" -#include "package_manager/packagemanagerfactory.h" #include "package_manager/packagemanagerfake.h" #include "storage/sqlstorage.h" #include "test_utils.h" +#include "uptane/fetcher.h" #include "uptane/tuf.h" #include "utilities/apiqueue.h" @@ -53,9 +56,10 @@ static void progress_cb(const Uptane::Target& target, const std::string& descrip * Resuming while not paused is ignored. * Resuming while not downloading is ignored */ -void test_pause(const Uptane::Target& target, PackageManager type = PackageManager::kNone) { +void test_pause(const Uptane::Target& target, const std::string& type = PACKAGE_MANAGER_NONE) { TemporaryDirectory temp_dir; config.storage.path = temp_dir.Path(); + config.pacman.images_path = temp_dir.Path() / "images"; config.uptane.repo_server = server; config.pacman.type = type; config.pacman.sysroot = sysroot; @@ -64,7 +68,7 @@ void test_pause(const Uptane::Target& target, PackageManager type = PackageManag std::shared_ptr storage(new SQLStorage(config.storage, false)); auto http = std::make_shared(); - auto pacman = PackageManagerFactory::makePackageManager(config.pacman, storage, nullptr, http); + auto pacman = PackageManagerFactory::makePackageManager(config.pacman, config.bootloader, storage, http); KeyManager keys(storage, config.keymanagerConfig()); Uptane::Fetcher fetcher(config, http); @@ -82,8 +86,7 @@ void test_pause(const Uptane::Target& target, PackageManager type = PackageManag std::thread([&target, &fetcher, &download_promise, &token, pacman, &keys]() { bool res = pacman->fetchTarget(target, fetcher, keys, progress_cb, &token); download_promise.set_value(res); - }) - .detach(); + }).detach(); std::thread([&token, &pause_promise]() { std::unique_lock lk(pause_m); @@ -94,8 +97,7 @@ void test_pause(const Uptane::Target& target, PackageManager type = PackageManag EXPECT_EQ(token.setPause(false), true); EXPECT_EQ(token.setPause(false), false); pause_promise.set_value(); - }) - .detach(); + }).detach(); ASSERT_EQ(result.wait_for(std::chrono::seconds(download_timeout)), std::future_status::ready); ASSERT_EQ(pause_res.wait_for(std::chrono::seconds(0)), std::future_status::ready); @@ -117,7 +119,7 @@ TEST(Fetcher, PauseOstree) { target_json["custom"]["targetFormat"] = "OSTREE"; target_json["length"] = 0; Uptane::Target target("pause", target_json); - test_pause(target, PackageManager::kOstree); + test_pause(target, PACKAGE_MANAGER_OSTREE); } #endif // BUILD_OSTREE @@ -147,13 +149,14 @@ class HttpCustomUri : public HttpFake { /* Download from URI specified in target metadata. */ TEST(Fetcher, DownloadCustomUri) { TemporaryDirectory temp_dir; + config.pacman.images_path = temp_dir.Path() / "images"; config.storage.path = temp_dir.Path(); config.uptane.repo_server = server; std::shared_ptr storage(new SQLStorage(config.storage, false)); auto http = std::make_shared(temp_dir.Path()); - auto pacman = std::make_shared(config.pacman, storage, nullptr, http); + auto pacman = std::make_shared(config.pacman, config.bootloader, storage, http); KeyManager keys(storage, config.keymanagerConfig()); Uptane::Fetcher fetcher(config, http); @@ -185,11 +188,12 @@ class HttpDefaultUri : public HttpFake { TEST(Fetcher, DownloadDefaultUri) { TemporaryDirectory temp_dir; config.storage.path = temp_dir.Path(); + config.pacman.images_path = temp_dir.Path() / "images"; config.uptane.repo_server = server; std::shared_ptr storage(new SQLStorage(config.storage, false)); auto http = std::make_shared(temp_dir.Path()); - auto pacman = std::make_shared(config.pacman, storage, nullptr, http); + auto pacman = std::make_shared(config.pacman, config.bootloader, storage, http); KeyManager keys(storage, config.keymanagerConfig()); Uptane::Fetcher fetcher(config, http); @@ -229,28 +233,30 @@ class HttpZeroLength : public HttpFake { HttpZeroLength(const boost::filesystem::path& test_dir_in) : HttpFake(test_dir_in) {} HttpResponse download(const std::string& url, curl_write_callback write_cb, curl_xferinfo_callback progress_cb, void* userp, curl_off_t from) override { - (void)write_cb; (void)progress_cb; - (void)userp; (void)from; EXPECT_EQ(url, server + "/targets/fake_file"); + const std::string content = "0"; + write_cb(const_cast(&content[0]), 1, 1, userp); counter++; - return HttpResponse("0", 200, CURLE_OK, ""); + return HttpResponse(content, 200, CURLE_OK, ""); } int counter = 0; }; -/* Don't bother downloading a target with length 0. */ +/* Don't bother downloading a target with length 0, but make sure verification + * still succeeds so that installation is possible. */ TEST(Fetcher, DownloadLengthZero) { TemporaryDirectory temp_dir; config.storage.path = temp_dir.Path(); + config.pacman.images_path = temp_dir.Path() / "images"; config.uptane.repo_server = server; std::shared_ptr storage(new SQLStorage(config.storage, false)); auto http = std::make_shared(temp_dir.Path()); - auto pacman = std::make_shared(config.pacman, storage, nullptr, http); + auto pacman = std::make_shared(config.pacman, config.bootloader, storage, http); KeyManager keys(storage, config.keymanagerConfig()); Uptane::Fetcher fetcher(config, http); @@ -258,17 +264,95 @@ TEST(Fetcher, DownloadLengthZero) { Json::Value empty_target_json; empty_target_json["hashes"]["sha256"] = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; empty_target_json["length"] = 0; + // Make sure this isn't confused for an old-style OSTree target. + empty_target_json["custom"]["targetFormat"] = "binary"; Uptane::Target empty_target("empty_file", empty_target_json); EXPECT_TRUE(pacman->fetchTarget(empty_target, fetcher, keys, progress_cb, nullptr)); + EXPECT_EQ(pacman->verifyTarget(empty_target), TargetStatus::kGood); + EXPECT_EQ(http->counter, 0); + + // Non-empty target: download succeeds, and http module is called. This is + // done purely to make sure the test is designed correctly. + Json::Value nonempty_target_json; + nonempty_target_json["hashes"]["sha256"] = "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9"; + nonempty_target_json["length"] = 1; + Uptane::Target nonempty_target("fake_file", nonempty_target_json); + EXPECT_TRUE(pacman->fetchTarget(nonempty_target, fetcher, keys, progress_cb, nullptr)); + EXPECT_EQ(pacman->verifyTarget(nonempty_target), TargetStatus::kGood); + EXPECT_EQ(http->counter, 1); +} + +/* Don't bother downloading a target that is larger than the available disk + * space. */ +TEST(Fetcher, NotEnoughDiskSpace) { + TemporaryDirectory temp_dir; + config.storage.path = temp_dir.Path(); + config.pacman.images_path = temp_dir.Path() / "images"; + config.uptane.repo_server = server; + + std::shared_ptr storage(new SQLStorage(config.storage, false)); + auto http = std::make_shared(temp_dir.Path()); + auto pacman = std::make_shared(config.pacman, config.bootloader, storage, http); + KeyManager keys(storage, config.keymanagerConfig()); + Uptane::Fetcher fetcher(config, http); + + // Find how much space is available on disk. + struct statvfs stvfsbuf {}; + EXPECT_EQ(statvfs(temp_dir.Path().c_str(), &stvfsbuf), 0); + const uint64_t available_bytes = (stvfsbuf.f_bsize * stvfsbuf.f_bavail); + + // Try to fetch a target larger than the available disk space: an exception is + // thrown and the http module is never called. Note the hash is bogus. + Json::Value empty_target_json; + empty_target_json["hashes"]["sha256"] = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; + empty_target_json["length"] = available_bytes * 2; + Uptane::Target empty_target("empty_file", empty_target_json); + EXPECT_FALSE(pacman->fetchTarget(empty_target, fetcher, keys, progress_cb, nullptr)); + EXPECT_NE(pacman->verifyTarget(empty_target), TargetStatus::kGood); + EXPECT_EQ(http->counter, 0); + + // Try to fetch a 1-byte target: download succeeds, and http module is called. + // This is done purely to make sure the test is designed correctly. + Json::Value nonempty_target_json; + nonempty_target_json["hashes"]["sha256"] = "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9"; + nonempty_target_json["length"] = 1; + Uptane::Target nonempty_target("fake_file", nonempty_target_json); + EXPECT_TRUE(pacman->fetchTarget(nonempty_target, fetcher, keys, progress_cb, nullptr)); + EXPECT_EQ(pacman->verifyTarget(nonempty_target), TargetStatus::kGood); + EXPECT_EQ(http->counter, 1); +} + +/* Abort downloading an OSTree target with the fake/binary package manager. */ +TEST(Fetcher, DownloadOstreeFail) { + TemporaryDirectory temp_dir; + config.storage.path = temp_dir.Path(); + config.pacman.images_path = temp_dir.Path() / "images"; + config.uptane.repo_server = server; + + std::shared_ptr storage(new SQLStorage(config.storage, false)); + auto http = std::make_shared(temp_dir.Path()); + auto pacman = std::make_shared(config.pacman, config.bootloader, storage, http); + KeyManager keys(storage, config.keymanagerConfig()); + Uptane::Fetcher fetcher(config, http); + + // Empty target: download succeeds, but http module is never called. + Json::Value empty_target_json; + empty_target_json["hashes"]["sha256"] = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; + empty_target_json["length"] = 0; + empty_target_json["custom"]["targetFormat"] = "OSTREE"; + Uptane::Target empty_target("empty_file", empty_target_json); + EXPECT_FALSE(pacman->fetchTarget(empty_target, fetcher, keys, progress_cb, nullptr)); + EXPECT_NE(pacman->verifyTarget(empty_target), TargetStatus::kGood); EXPECT_EQ(http->counter, 0); // Non-empty target: download succeeds, and http module is called. This is // done purely to make sure the test is designed correctly. Json::Value nonempty_target_json; - nonempty_target_json["hashes"]["sha256"] = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; + nonempty_target_json["hashes"]["sha256"] = "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9"; nonempty_target_json["length"] = 1; Uptane::Target nonempty_target("fake_file", nonempty_target_json); EXPECT_TRUE(pacman->fetchTarget(nonempty_target, fetcher, keys, progress_cb, nullptr)); + EXPECT_EQ(pacman->verifyTarget(nonempty_target), TargetStatus::kGood); EXPECT_EQ(http->counter, 1); } diff --git a/src/libaktualizr/package_manager/ostreemanager.cc b/src/libaktualizr/package_manager/ostreemanager.cc index 76e564d7bb..118bc70d4f 100644 --- a/src/libaktualizr/package_manager/ostreemanager.cc +++ b/src/libaktualizr/package_manager/ostreemanager.cc @@ -1,7 +1,7 @@ -#include "package_manager/ostreemanager.h" +#include "ostreemanager.h" -#include #include +#include #include #include @@ -13,9 +13,16 @@ #include #include +#include "libaktualizr/packagemanagerfactory.h" + +#include "bootloader/bootloader.h" #include "logging/logging.h" +#include "storage/invstorage.h" #include "utilities/utils.h" +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +AUTO_REGISTER_PACKAGE_MANAGER(PACKAGE_MANAGER_OSTREE, OstreeManager); + static void aktualizr_progress_cb(OstreeAsyncProgress *progress, gpointer data) { auto *mt = static_cast(data); if (mt->token != nullptr && !mt->token->canContinue()) { @@ -64,8 +71,14 @@ static void aktualizr_progress_cb(OstreeAsyncProgress *progress, gpointer data) data::InstallationResult OstreeManager::pull(const boost::filesystem::path &sysroot_path, const std::string &ostree_server, const KeyManager &keys, const Uptane::Target &target, const api::FlowControlToken *token, - OstreeProgressCb progress_cb) { + OstreeProgressCb progress_cb, const char *alt_remote, + boost::optional> headers) { + if (!target.IsOstree()) { + throw std::logic_error("Invalid type of Target, got " + target.type() + ", expected OSTREE"); + } + const std::string refhash = target.sha256Hash(); + // NOLINTNEXTLINE(modernize-avoid-c-arrays, cppcoreguidelines-avoid-c-arrays, hicpp-avoid-c-arrays) const char *const commit_ids[] = {refhash.c_str()}; GError *error = nullptr; GVariantBuilder builder; @@ -95,8 +108,20 @@ data::InstallationResult OstreeManager::pull(const boost::filesystem::path &sysr error = nullptr; } - if (!OstreeManager::addRemote(repo.get(), ostree_server, keys)) { - return data::InstallationResult(data::ResultCode::Numeric::kInstallFailed, "Error adding OSTree remote"); + if (alt_remote == nullptr) { + std::string ostree_remote_uri; + // If the Target specifies a custom fetch uri, use that. + std::string uri_override = target.uri(); + if (uri_override.empty()) { + ostree_remote_uri = ostree_server; + } else { + ostree_remote_uri = uri_override; + } + // addRemote overwrites any previous ostree remote that was set + if (!OstreeManager::addRemote(repo.get(), ostree_remote_uri, keys)) { + return data::InstallationResult(data::ResultCode::Numeric::kInstallFailed, + std::string("Error adding a default OSTree remote: ") + remote); + } } g_variant_builder_init(&builder, G_VARIANT_TYPE("a{sv}")); @@ -104,11 +129,23 @@ data::InstallationResult OstreeManager::pull(const boost::filesystem::path &sysr g_variant_builder_add(&builder, "{s@v}", "refs", g_variant_new_variant(g_variant_new_strv(commit_ids, 1))); + if (!!headers && !(*headers).empty()) { + GVariantBuilder hdr_builder; + g_variant_builder_init(&hdr_builder, G_VARIANT_TYPE("a(ss)")); + + for (const auto &kv : *headers) { + g_variant_builder_add(&hdr_builder, "(ss)", kv.first.c_str(), kv.second.c_str()); + } + g_variant_builder_add(&builder, "{s@v}", "http-headers", + g_variant_new_variant(g_variant_builder_end(&hdr_builder))); + } + options = g_variant_builder_end(&builder); PullMetaStruct mt(target, token, g_cancellable_new(), std::move(progress_cb)); progress.reset(ostree_async_progress_new_and_connect(aktualizr_progress_cb, &mt)); - if (ostree_repo_pull_with_options(repo.get(), remote, options, progress.get(), mt.cancellable.get(), &error) == 0) { + if (ostree_repo_pull_with_options(repo.get(), alt_remote == nullptr ? remote : alt_remote, options, progress.get(), + mt.cancellable.get(), &error) == 0) { LOG_ERROR << "Error while pulling image: " << error->code << " " << error->message; data::InstallationResult install_res(data::ResultCode::Numeric::kInstallFailed, error->message); g_error_free(error); @@ -117,7 +154,7 @@ data::InstallationResult OstreeManager::pull(const boost::filesystem::path &sysr } ostree_async_progress_finish(progress.get()); g_variant_unref(options); - return data::InstallationResult(data::ResultCode::Numeric::kOk, "Pulling ostree image was successful"); + return data::InstallationResult(data::ResultCode::Numeric::kOk, "Pulling OSTree image was successful"); } data::InstallationResult OstreeManager::install(const Uptane::Target &target) const { @@ -126,7 +163,7 @@ data::InstallationResult OstreeManager::install(const Uptane::Target &target) co GError *error = nullptr; g_autofree char *revision = nullptr; - if (config.os.size() != 0u) { + if (!config.os.empty()) { opt_osname = config.os.c_str(); } @@ -164,6 +201,7 @@ data::InstallationResult OstreeManager::install(const Uptane::Target &target) co std::string args_content = std::string(ostree_bootconfig_parser_get(ostree_deployment_get_bootconfig(merge_deployment.get()), "options")); std::vector args_vector; + // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) boost::split(args_vector, args_content, boost::is_any_of(" ")); std::vector kargs_strv_vector; @@ -173,7 +211,7 @@ data::InstallationResult OstreeManager::install(const Uptane::Target &target) co kargs_strv_vector.push_back((*it).c_str()); } kargs_strv_vector[args_vector.size()] = nullptr; - auto kargs_strv = const_cast(&kargs_strv_vector[0]); + auto *kargs_strv = const_cast(&kargs_strv_vector[0]); OstreeDeployment *new_deployment_raw = nullptr; if (ostree_sysroot_deploy_tree(sysroot.get(), opt_osname, revision, origin.get(), merge_deployment.get(), kargs_strv, @@ -209,33 +247,61 @@ void OstreeManager::completeInstall() const { bootloader_->reboot(); } -data::InstallationResult OstreeManager::finalizeInstall(const Uptane::Target &target) const { - LOG_INFO << "Checking installation of new OSTree sysroot"; - Uptane::Target current = getCurrent(); +data::InstallationResult OstreeManager::finalizeInstall(const Uptane::Target &target) { + const std::string current_hash = getCurrentHash(); - if (current.sha256Hash() != target.sha256Hash()) { - LOG_ERROR << "Expected to boot on " << target.sha256Hash() << " but, " << current.sha256Hash() - << " found, the system might have experienced a rollback"; - return data::InstallationResult(data::ResultCode::Numeric::kInstallFailed, "Wrong version booted"); + if (!bootloader_->rebootDetected()) { + // A device can be rebooted in the middle of the "finalization" process, specifically, right after the reboot flag + // is cleared and before the pending target is marked as current. So, if a device is in such a state and gets + // rebooted, the client will run "finalization" again to apply the pending target. Since the device is already + // booted on the pending target, the "finalization" process should return "Ok." Returning "NeedCompletion" in this + // case may result in an endless loop of device reboots. + return current_hash == target.sha256Hash() + ? data::InstallationResult(data::ResultCode::Numeric::kOk, "Already booted on the required version") + : data::InstallationResult(data::ResultCode::Numeric::kNeedCompletion, + "Reboot is required for the pending update application"); } - return data::InstallationResult(data::ResultCode::Numeric::kOk, "Successfully booted on new version"); + data::InstallationResult install_result = + data::InstallationResult(data::ResultCode::Numeric::kOk, "Successfully booted on new version"); + LOG_INFO << "Checking installation of new OSTree sysroot"; + if (current_hash != target.sha256Hash()) { + LOG_ERROR << "Expected to boot " << target.sha256Hash() << " but found " << current_hash + << ". The system may have been rolled back."; + install_result = data::InstallationResult(data::ResultCode::Numeric::kInstallFailed, "Wrong version booted"); + } + + bootloader_->rebootFlagClear(); + return install_result; } -OstreeManager::OstreeManager(PackageConfig pconfig, std::shared_ptr storage, - std::shared_ptr bootloader, std::shared_ptr http) - : PackageManagerInterface(std::move(pconfig), std::move(storage), std::move(bootloader), std::move(http)) { +void OstreeManager::updateNotify() { bootloader_->updateNotify(); } +void OstreeManager::installNotify(const Uptane::Target &target) { bootloader_->installNotify(target); } + +OstreeManager::OstreeManager(const PackageConfig &pconfig, const BootloaderConfig &bconfig, + const std::shared_ptr &storage, const std::shared_ptr &http, + Bootloader *bootloader) + : PackageManagerInterface(pconfig, BootloaderConfig(), storage, http), + bootloader_(bootloader == nullptr ? new Bootloader(bconfig, *storage) : bootloader) { GObjectUniquePtr sysroot_smart = OstreeManager::LoadSysroot(config.sysroot); if (sysroot_smart == nullptr) { throw std::runtime_error("Could not find OSTree sysroot at: " + config.sysroot.string()); } + + // consider boot successful as soon as we started, missing internet connection or connection to Secondaries are not + // proper reasons to roll back + if (imageUpdated()) { + bootloader_->setBootOK(); + } } +OstreeManager::~OstreeManager() { bootloader_.reset(nullptr); } + bool OstreeManager::fetchTarget(const Uptane::Target &target, Uptane::Fetcher &fetcher, const KeyManager &keys, - FetcherProgressCb progress_cb, const api::FlowControlToken *token) { + const FetcherProgressCb &progress_cb, const api::FlowControlToken *token) { if (!target.IsOstree()) { - // The case when the ostree package manager is set as a package manager for aktualizr - // while the target is aimed for a secondary ECU that is configured with another/non-ostree package manager + // The case when the OSTree package manager is set as a package manager for aktualizr + // while the target is aimed for a Secondary ECU that is configured with another/non-OSTree package manager return PackageManagerInterface::fetchTarget(target, fetcher, keys, progress_cb, token); } return OstreeManager::pull(config.sysroot, config.ostree_server, keys, target, token, progress_cb).success; @@ -243,8 +309,8 @@ bool OstreeManager::fetchTarget(const Uptane::Target &target, Uptane::Fetcher &f TargetStatus OstreeManager::verifyTarget(const Uptane::Target &target) const { if (!target.IsOstree()) { - // The case when the ostree package manager is set as a package manager for aktualizr - // while the target is aimed for a secondary ECU that is configured with another/non-ostree package manager + // The case when the OSTree package manager is set as a package manager for aktualizr + // while the target is aimed for a Secondary ECU that is configured with another/non-OSTree package manager return PackageManagerInterface::verifyTarget(target); } return verifyTargetInternal(target); @@ -283,6 +349,7 @@ TargetStatus OstreeManager::verifyTargetInternal(const Uptane::Target &target) c Json::Value OstreeManager::getInstalledPackages() const { std::string packages_str = Utils::readFile(config.packages_file); std::vector package_lines; + // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) boost::split(package_lines, packages_str, boost::is_any_of("\n")); Json::Value packages(Json::arrayValue); for (auto it = package_lines.begin(); it != package_lines.end(); ++it) { @@ -301,28 +368,45 @@ Json::Value OstreeManager::getInstalledPackages() const { return packages; } -Uptane::Target OstreeManager::getCurrent() const { +std::string OstreeManager::getCurrentHash() const { + OstreeDeployment *deployment = nullptr; GObjectUniquePtr sysroot_smart = OstreeManager::LoadSysroot(config.sysroot); - OstreeDeployment *booted_deployment = ostree_sysroot_get_booted_deployment(sysroot_smart.get()); - if (booted_deployment == nullptr) { - throw std::runtime_error("Could not get booted deployment in " + config.sysroot.string()); + if (config.booted == BootedType::kBooted) { + deployment = ostree_sysroot_get_booted_deployment(sysroot_smart.get()); + } else { + g_autoptr(GPtrArray) deployments = ostree_sysroot_get_deployments(sysroot_smart.get()); + if (deployments != nullptr && deployments->len > 0) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + deployment = static_cast(deployments->pdata[0]); + } + } + if (deployment == nullptr) { + std::stringstream text; + text << "Could not get " << config.booted << " deployment in " << config.sysroot.string(); + throw std::runtime_error(text.str()); } - std::string current_hash = ostree_deployment_get_csum(booted_deployment); + return ostree_deployment_get_csum(deployment); +} +Uptane::Target OstreeManager::getCurrent() const { + const std::string current_hash = getCurrentHash(); boost::optional current_version; + // This may appear Primary-specific, but since Secondaries only know about + // themselves, this actually works just fine for them, too. storage_->loadPrimaryInstalledVersions(¤t_version, nullptr); if (!!current_version && current_version->sha256Hash() == current_hash) { return *current_version; } - LOG_ERROR << "Current versions in storage and reported by ostree do not match"; + LOG_ERROR << "Current versions in storage and reported by OSTree do not match"; - // Look into installation log to find a possible candidate + // Look into installation log to find a possible candidate. Again, despite the + // name, this will work for Secondaries as well. std::vector installed_versions; storage_->loadPrimaryInstallationLog(&installed_versions, false); - // Version should be in installed versions. Its possible that multiple + // Version should be in installed versions. It's possible that multiple // targets could have the same sha256Hash. In this case the safest assumption // is that the most recent (the reverse of the vector) target is what we // should return. @@ -332,8 +416,17 @@ Uptane::Target OstreeManager::getCurrent() const { return *it; } } - - return Uptane::Target::Unknown(); + // We haven't found a matching target. This can occur when a device is + // freshly manufactured and the factory image is in a delegated target. + // Aktualizr will have had no reason to fetch the relevant delegation, and it + // doesn't know where in the delegation tree on the server it might be. + // See https://github.com/uptane/aktualizr/issues/1 for more details. In this + // case attempt to construct an approximate Uptane target. By getting the + // hash correct the server has a chance to figure out what is running on the + // device. + Uptane::EcuMap ecus; + std::vector hashes{Hash(Hash::Type::kSha256, current_hash)}; + return {"unknown", ecus, hashes, 0, "", "OSTREE"}; } // used for bootloader rollback @@ -390,10 +483,11 @@ GObjectUniquePtr OstreeManager::LoadSysroot(const boost::filesyst } GError *error = nullptr; if (ostree_sysroot_load(sysroot.get(), nullptr, &error) == 0) { + const std::string msg = error->message; if (error != nullptr) { g_error_free(error); } - throw std::runtime_error("could not load sysroot"); + throw std::runtime_error("could not load sysroot at " + path.string() + ": " + msg); } return sysroot; } diff --git a/src/libaktualizr/package_manager/ostreemanager.h b/src/libaktualizr/package_manager/ostreemanager.h index 6484bae715..1fa223141f 100644 --- a/src/libaktualizr/package_manager/ostreemanager.h +++ b/src/libaktualizr/package_manager/ostreemanager.h @@ -1,17 +1,20 @@ #ifndef OSTREE_H_ #define OSTREE_H_ +#include #include #include +#include #include #include +#include "libaktualizr/packagemanagerinterface.h" + #include "crypto/keymanager.h" -#include "packagemanagerinterface.h" #include "utilities/apiqueue.h" -const char remote[] = "aktualizr-remote"; +constexpr const char *remote = "aktualizr-remote"; template struct GObjectFinalizer { @@ -39,31 +42,42 @@ struct PullMetaStruct { class OstreeManager : public PackageManagerInterface { public: - OstreeManager(PackageConfig pconfig, std::shared_ptr storage, std::shared_ptr bootloader, - std::shared_ptr http); - ~OstreeManager() override = default; + OstreeManager(const PackageConfig &pconfig, const BootloaderConfig &bconfig, + const std::shared_ptr &storage, const std::shared_ptr &http, + Bootloader *bootloader = nullptr); + ~OstreeManager() override; + OstreeManager(const OstreeManager &) = delete; + OstreeManager(OstreeManager &&) = delete; + OstreeManager &operator=(const OstreeManager &) = delete; + OstreeManager &operator=(OstreeManager &&) = delete; std::string name() const override { return "ostree"; } Json::Value getInstalledPackages() const override; + virtual std::string getCurrentHash() const; Uptane::Target getCurrent() const override; - bool imageUpdated() override; + bool imageUpdated(); data::InstallationResult install(const Uptane::Target &target) const override; void completeInstall() const override; - data::InstallationResult finalizeInstall(const Uptane::Target &target) const override; + data::InstallationResult finalizeInstall(const Uptane::Target &target) override; + void updateNotify() override; + void installNotify(const Uptane::Target &target) override; bool fetchTarget(const Uptane::Target &target, Uptane::Fetcher &fetcher, const KeyManager &keys, - FetcherProgressCb progress_cb, const api::FlowControlToken *token) override; + const FetcherProgressCb &progress_cb, const api::FlowControlToken *token) override; TargetStatus verifyTarget(const Uptane::Target &target) const override; GObjectUniquePtr getStagedDeployment() const; static GObjectUniquePtr LoadSysroot(const boost::filesystem::path &path); static GObjectUniquePtr LoadRepo(OstreeSysroot *sysroot, GError **error); static bool addRemote(OstreeRepo *repo, const std::string &url, const KeyManager &keys); - static data::InstallationResult pull(const boost::filesystem::path &sysroot_path, const std::string &ostree_server, - const KeyManager &keys, const Uptane::Target &target, - const api::FlowControlToken *token = nullptr, - OstreeProgressCb progress_cb = nullptr); + static data::InstallationResult pull( + const boost::filesystem::path &sysroot_path, const std::string &ostree_server, const KeyManager &keys, + const Uptane::Target &target, const api::FlowControlToken *token = nullptr, + OstreeProgressCb progress_cb = nullptr, const char *alt_remote = nullptr, + boost::optional> headers = boost::none); private: TargetStatus verifyTargetInternal(const Uptane::Target &target) const; + + std::unique_ptr bootloader_; }; #endif // OSTREE_H_ diff --git a/src/libaktualizr/package_manager/ostreemanager_test.cc b/src/libaktualizr/package_manager/ostreemanager_test.cc index 42d57849f8..bd8c361bc6 100644 --- a/src/libaktualizr/package_manager/ostreemanager_test.cc +++ b/src/libaktualizr/package_manager/ostreemanager_test.cc @@ -1,17 +1,14 @@ #include -#include #include #include #include -#include #include -#include "config/config.h" +#include "libaktualizr/config.h" #include "package_manager/ostreemanager.h" #include "storage/invstorage.h" -#include "utilities/types.h" #include "utilities/utils.h" boost::filesystem::path test_sysroot; @@ -21,7 +18,7 @@ TEST(OstreeManager, PullBadUriNoCreds) { TemporaryDirectory temp_dir; Config config; config.pacman.ostree_server = "bad-url"; - config.pacman.type = PackageManager::kOstree; + config.pacman.type = PACKAGE_MANAGER_OSTREE; config.pacman.sysroot = test_sysroot; config.storage.path = temp_dir.Path(); @@ -44,7 +41,7 @@ TEST(OstreeManager, PullBadUriWithCreds) { TemporaryDirectory temp_dir; Config config; config.pacman.ostree_server = "bad-url"; - config.pacman.type = PackageManager::kOstree; + config.pacman.type = PACKAGE_MANAGER_OSTREE; config.pacman.sysroot = test_sysroot; config.storage.path = temp_dir.Path(); @@ -76,12 +73,12 @@ TEST(OstreeManager, InstallBadUri) { Uptane::Target target("branch-name-hash", target_json); TemporaryDirectory temp_dir; Config config; - config.pacman.type = PackageManager::kOstree; + config.pacman.type = PACKAGE_MANAGER_OSTREE; config.pacman.sysroot = test_sysroot; config.storage.path = temp_dir.Path(); std::shared_ptr storage = INvStorage::newStorage(config.storage); - OstreeManager ostree(config.pacman, storage, nullptr, nullptr); + OstreeManager ostree(config.pacman, config.bootloader, storage, nullptr); data::InstallationResult result = ostree.install(target); EXPECT_EQ(result.result_code.num_code, data::ResultCode::Numeric::kInstallFailed); EXPECT_EQ(result.description, "Refspec 'hash' not found"); @@ -91,11 +88,11 @@ TEST(OstreeManager, InstallBadUri) { TEST(OstreeManager, BadSysroot) { TemporaryDirectory temp_dir; Config config; - config.pacman.type = PackageManager::kOstree; + config.pacman.type = PACKAGE_MANAGER_OSTREE; config.pacman.sysroot = "sysroot-that-is-missing"; config.storage.path = temp_dir.Path(); std::shared_ptr storage = INvStorage::newStorage(config.storage); - EXPECT_THROW(OstreeManager ostree(config.pacman, storage, nullptr, nullptr), std::runtime_error); + EXPECT_THROW(OstreeManager ostree(config.pacman, config.bootloader, storage, nullptr), std::runtime_error); } /* Parse a provided list of installed packages. */ @@ -112,13 +109,13 @@ TEST(OstreeManager, ParseInstalledPackages) { Utils::writeFile(packages_file, content); Config config; - config.pacman.type = PackageManager::kOstree; + config.pacman.type = PACKAGE_MANAGER_OSTREE; config.pacman.sysroot = test_sysroot; config.pacman.packages_file = packages_file; config.storage.path = temp_dir.Path(); std::shared_ptr storage = INvStorage::newStorage(config.storage); - OstreeManager ostree(config.pacman, storage, nullptr, nullptr); + OstreeManager ostree(config.pacman, config.bootloader, storage, nullptr); Json::Value packages = ostree.getInstalledPackages(); EXPECT_EQ(packages[0]["name"].asString(), "vim"); EXPECT_EQ(packages[0]["version"].asString(), "1.0"); @@ -128,11 +125,35 @@ TEST(OstreeManager, ParseInstalledPackages) { EXPECT_EQ(packages[2]["version"].asString(), "1.1"); } +/** + * Check that OstreeManager::getCurrent() returns a sensible result, even if it + * can't match the currently booted OSTree commit against a known target. + * See: https://github.com/uptane/aktualizr/issues/1 + */ +TEST(OstreeManager, GetCurrentTarget) { + TemporaryDirectory temp_dir; + Config config; + config.pacman.type = PACKAGE_MANAGER_OSTREE; + config.pacman.sysroot = test_sysroot; + config.storage.path = temp_dir.Path(); + config.pacman.booted = BootedType::kStaged; + auto storage = INvStorage::newStorage(config.storage); + OstreeManager dut(config.pacman, config.bootloader, storage, nullptr); + auto current_target = dut.getCurrent(); + + EXPECT_TRUE(current_target.IsValid()) << "shouldn't be Uptane::Target::Unknown()"; + + EXPECT_TRUE(current_target.IsOstree()) << "should be an OSTree target"; + // This is a slightly circular test, but OstreeManager::getCurrentHash() + // fetches the hash straight from libostree. + EXPECT_EQ(dut.getCurrentHash(), current_target.sha256Hash()) << "hash should match"; +} + /* Communicate with a remote OSTree server without credentials. */ TEST(OstreeManager, AddRemoteNoCreds) { TemporaryDirectory temp_dir; Config config; - config.pacman.type = PackageManager::kOstree; + config.pacman.type = PACKAGE_MANAGER_OSTREE; config.pacman.sysroot = test_sysroot; config.storage.path = temp_dir.Path(); @@ -172,7 +193,7 @@ TEST(OstreeManager, AddRemoteNoCreds) { TEST(OstreeManager, AddRemoteWithCreds) { TemporaryDirectory temp_dir; Config config; - config.pacman.type = PackageManager::kOstree; + config.pacman.type = PACKAGE_MANAGER_OSTREE; config.pacman.sysroot = test_sysroot; config.storage.path = temp_dir.Path(); diff --git a/src/libaktualizr/package_manager/ostreereposync.cc b/src/libaktualizr/package_manager/ostreereposync.cc deleted file mode 100644 index 41fd1adc79..0000000000 --- a/src/libaktualizr/package_manager/ostreereposync.cc +++ /dev/null @@ -1,146 +0,0 @@ -#include - -#include - -#include -#include -#include "logging/logging.h" - -namespace { -const OstreeRepoMode kOstreeRepoModeArchive = -#if !defined(OSTREE_CHECK_VERSION) - OSTREE_REPO_MODE_ARCHIVE_Z2; -#elif (OSTREE_CHECK_VERSION(2017, 12)) - OSTREE_REPO_MODE_ARCHIVE; -#else - OSTREE_REPO_MODE_ARCHIVE_Z2; -#endif -} // namespace - -namespace fs = boost::filesystem; - -namespace ostree_repo_sync { - -bool ArchiveModeRepo(const fs::path& repo_dir) { - GError* error = nullptr; - GFile* repo_path = nullptr; - OstreeRepo* repo = nullptr; - - BOOST_SCOPE_EXIT(&error, &repo_path, &repo) { // NOLINT - if (error != nullptr) { - g_error_free(error); - } - if (repo_path != nullptr) { - g_object_unref(repo_path); - } - if (repo != nullptr) { - g_object_unref(repo); - } - } - BOOST_SCOPE_EXIT_END - - repo_path = g_file_new_for_path(repo_dir.c_str()); - repo = ostree_repo_new(repo_path); - - gboolean open_succeed = ostree_repo_open(repo, nullptr, &error); - - return ((open_succeed != 0) && (ostree_repo_get_mode(repo) == kOstreeRepoModeArchive)); -} - -bool LocalPullRepo(const fs::path& src_repo_dir, const fs::path& dst_repo_dir, const std::string& ref_hash) { - GError* error = nullptr; - GVariant* options = nullptr; - GHashTable* refs = nullptr; - GPtrArray* refs_to_fetch = g_ptr_array_new_with_free_func(g_free); - OstreeRepo *src_repo = nullptr, *dst_repo = nullptr; - GFile *src_repo_path = nullptr, *dst_repo_path = nullptr; - - BOOST_SCOPE_EXIT(&error, &options, &refs, &refs_to_fetch, &src_repo_path, &src_repo, &dst_repo_path, // NOLINT - &dst_repo) { - if (error != nullptr) { - g_error_free(error); - } - if (options != nullptr) { - g_variant_unref(options); - } - if (src_repo_path != nullptr) { - g_object_unref(src_repo_path); - } - if (src_repo != nullptr) { - g_object_unref(src_repo); - } - if (dst_repo_path != nullptr) { - g_object_unref(dst_repo_path); - } - if (dst_repo != nullptr) { - g_object_unref(dst_repo); - } - if (refs != nullptr) { - g_hash_table_unref(refs); - } - g_ptr_array_unref(refs_to_fetch); - } - BOOST_SCOPE_EXIT_END - - // check source repo - src_repo_path = g_file_new_for_path(src_repo_dir.c_str()); - src_repo = ostree_repo_new(src_repo_path); - if (ostree_repo_open(src_repo, nullptr, &error) == 0) { - LOG_ERROR << "OSTree sync error: unable to open source repo, " << error->message; - return false; - } - - // check destination repo - dst_repo_path = g_file_new_for_path(dst_repo_dir.c_str()); - dst_repo = ostree_repo_new(dst_repo_path); - if (ostree_repo_create(dst_repo, kOstreeRepoModeArchive, nullptr, &error) == 0) { - LOG_ERROR << "OSTree sync error: unable to open destination repo, " << error->message; - return false; - } - - // collect refs to pull - // - // Under some circumstances the following call may not be enough, - // see the comment before the same call in ostree sources - // (src/ostree/ot-builtin-pull-local.c). - if (ostree_repo_list_refs(src_repo, nullptr, &refs, nullptr, &error) == 0) { - LOG_ERROR << "OSTree sync error: unable to get refs on source repo, " << error->message; - return false; - } - { - GHashTableIter hashiter; - gpointer hkey, hvalue; - - g_hash_table_iter_init(&hashiter, refs); - while (g_hash_table_iter_next(&hashiter, &hkey, &hvalue) != 0) { - g_ptr_array_add(refs_to_fetch, g_strdup(static_cast(hkey))); - } - } - g_ptr_array_add(refs_to_fetch, nullptr); - - // pull from source repo - const char* const refs_to_fetch_list[] = {ref_hash.c_str()}; - GVariantBuilder builder; - g_variant_builder_init(&builder, G_VARIANT_TYPE("a{sv}")); - g_variant_builder_add(&builder, "{s@v}", "flags", g_variant_new_variant(g_variant_new_int32(0))); - if (strlen(refs_to_fetch_list[0]) == 0) { - g_variant_builder_add( - &builder, "{s@v}", "refs", - g_variant_new_variant(g_variant_new_strv(reinterpret_cast(refs_to_fetch->pdata), -1))); - } else { - g_variant_builder_add(&builder, "{s@v}", "refs", g_variant_new_variant(g_variant_new_strv(refs_to_fetch_list, 1))); - } - options = g_variant_ref_sink(g_variant_builder_end(&builder)); - - std::string src_repo_url("file://"); - src_repo_url += src_repo_dir.native(); - if (ostree_repo_pull_with_options(dst_repo, src_repo_url.c_str(), options, nullptr, nullptr, &error) == 0) { - LOG_ERROR << "OSTree sync error: unable to pull repository, " << error->message; - return false; - } - return true; -} - -fs::path GetOstreeRepoPath(const fs::path& ostree_sysroot_path) { return (ostree_sysroot_path / "ostree" / "repo"); } - -} // namespace ostree_repo_sync diff --git a/src/libaktualizr/package_manager/ostreereposync.h b/src/libaktualizr/package_manager/ostreereposync.h deleted file mode 100644 index 9d82da940e..0000000000 --- a/src/libaktualizr/package_manager/ostreereposync.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef OSTREEREPOSYNC_H_ -#define OSTREEREPOSYNC_H_ - -#include - -namespace boost { -namespace filesystem { -class path; -} // namespace filesystem -} // namespace boost - -namespace ostree_repo_sync { - -bool ArchiveModeRepo(const boost::filesystem::path& tmp_repo_dir); -bool LocalPullRepo(const boost::filesystem::path& src_repo_dir, const boost::filesystem::path& dst_repo_dir, - const std::string& = ""); -boost::filesystem::path GetOstreeRepoPath(const boost::filesystem::path& ostree_sysroot_path); - -} // namespace ostree_repo_sync - -#endif // OSTREEREPOSYNC_H_ diff --git a/src/libaktualizr/package_manager/packagemanagerconfig.cc b/src/libaktualizr/package_manager/packagemanagerconfig.cc index 53677bd81e..32f2ca7ffd 100644 --- a/src/libaktualizr/package_manager/packagemanagerconfig.cc +++ b/src/libaktualizr/package_manager/packagemanagerconfig.cc @@ -1,50 +1,28 @@ -#include "package_manager/packagemanagerconfig.h" - -#include -#include -#include -#include - -std::ostream& operator<<(std::ostream& os, PackageManager pm) { - std::string pm_str; - switch (pm) { - case PackageManager::kOstree: - pm_str = "ostree"; - break; - case PackageManager::kDebian: - pm_str = "debian"; - break; - case PackageManager::kOstreeDockerApp: - pm_str = "ostree+docker-app"; - break; - case PackageManager::kNone: - default: - pm_str = "none"; - break; - } - os << '"' << pm_str << '"'; - return os; -} +#include "libaktualizr/config.h" +#include "utilities/config_utils.h" void PackageConfig::updateFromPropertyTree(const boost::property_tree::ptree& pt) { - CopyFromConfig(type, "type", pt); - CopyFromConfig(os, "os", pt); - CopyFromConfig(sysroot, "sysroot", pt); - CopyFromConfig(ostree_server, "ostree_server", pt); - CopyFromConfig(packages_file, "packages_file", pt); - CopyFromConfig(fake_need_reboot, "fake_need_reboot", pt); -#ifdef BUILD_DOCKERAPP - std::string val; - CopyFromConfig(val, "docker_apps", pt); - if (val.length() > 0) { - // token_compress_on allows lists like: "foo,bar", "foo, bar", or "foo bar" - boost::split(docker_apps, val, boost::is_any_of(", "), boost::token_compress_on); - CopyFromConfig(docker_apps_root, "docker_apps_root", pt); - CopyFromConfig(docker_app_params, "docker_app_params", pt); - CopyFromConfig(docker_app_bin, "docker_app_bin", pt); - CopyFromConfig(docker_compose_bin, "docker_compose_bin", pt); + for (const auto& cp : pt) { + if (cp.first == "type") { + CopyFromConfig(type, cp.first, pt); + } else if (cp.first == "os") { + CopyFromConfig(os, cp.first, pt); + } else if (cp.first == "sysroot") { + CopyFromConfig(sysroot, cp.first, pt); + } else if (cp.first == "ostree_server") { + CopyFromConfig(ostree_server, cp.first, pt); + } else if (cp.first == "images_path") { + CopyFromConfig(images_path, cp.first, pt); + } else if (cp.first == "packages_file") { + CopyFromConfig(packages_file, cp.first, pt); + } else if (cp.first == "fake_need_reboot") { + CopyFromConfig(fake_need_reboot, cp.first, pt); + } else if (cp.first == "booted") { + CopyFromConfig(booted, cp.first, pt); + } else { + extra[cp.first] = Utils::stripQuotes(cp.second.get_value()); + } } -#endif } void PackageConfig::writeToStream(std::ostream& out_stream) const { @@ -52,13 +30,14 @@ void PackageConfig::writeToStream(std::ostream& out_stream) const { writeOption(out_stream, os, "os"); writeOption(out_stream, sysroot, "sysroot"); writeOption(out_stream, ostree_server, "ostree_server"); + writeOption(out_stream, images_path, "images_path"); writeOption(out_stream, packages_file, "packages_file"); writeOption(out_stream, fake_need_reboot, "fake_need_reboot"); -#ifdef BUILD_DOCKERAPP - writeOption(out_stream, boost::algorithm::join(docker_apps, ","), "docker_apps"); - writeOption(out_stream, docker_apps_root, "docker_apps_root"); - writeOption(out_stream, docker_app_params, "docker_app_params"); - writeOption(out_stream, docker_app_bin, "docker_app_bin"); - writeOption(out_stream, docker_compose_bin, "docker_compose_bin"); -#endif + writeOption(out_stream, booted, "booted"); + + // note that this is imperfect as it will not print default values deduced + // from users of `extra` + for (const auto& e : extra) { + writeOption(out_stream, e.second, e.first); + } } diff --git a/src/libaktualizr/package_manager/packagemanagerconfig.h b/src/libaktualizr/package_manager/packagemanagerconfig.h deleted file mode 100644 index d19bdd87c2..0000000000 --- a/src/libaktualizr/package_manager/packagemanagerconfig.h +++ /dev/null @@ -1,55 +0,0 @@ -#ifndef PACKAGE_MANAGER_PACKAGEMANAGERCONFIG_H_ -#define PACKAGE_MANAGER_PACKAGEMANAGERCONFIG_H_ - -#include -#include -#include - -#include "utilities/config_utils.h" - -enum class PackageManager { kNone = 0, kOstree, kDebian, kAndroid, kOstreeDockerApp }; -std::ostream& operator<<(std::ostream& os, PackageManager pm); - -struct PackageConfig { - PackageManager type{PackageManager::kOstree}; - std::string os; - boost::filesystem::path sysroot; - std::string ostree_server; - boost::filesystem::path packages_file{"/usr/package.manifest"}; - -#ifdef BUILD_DOCKERAPP - std::vector docker_apps; - boost::filesystem::path docker_apps_root; - boost::filesystem::path docker_app_params; - boost::filesystem::path docker_app_bin{"/usr/bin/docker-app"}; - boost::filesystem::path docker_compose_bin{"/usr/bin/docker-compose"}; -#endif - - // Options for simulation (to be used with kNone) - bool fake_need_reboot{false}; - - void updateFromPropertyTree(const boost::property_tree::ptree& pt); - void writeToStream(std::ostream& out_stream) const; -}; - -template <> -inline void CopyFromConfig(PackageManager& dest, const std::string& option_name, - const boost::property_tree::ptree& pt) { - boost::optional value = pt.get_optional(option_name); - if (value.is_initialized()) { - std::string pm_type{StripQuotesFromStrings(value.get())}; - if (pm_type == "ostree") { - dest = PackageManager::kOstree; - } else if (pm_type == "debian") { - dest = PackageManager::kDebian; - } else if (pm_type == "android") { - dest = PackageManager::kAndroid; - } else if (pm_type == "ostree+docker-app") { - dest = PackageManager::kOstreeDockerApp; - } else { - dest = PackageManager::kNone; - } - } -} - -#endif // PACKAGE_MANAGER_PACKAGEMANAGERCONFIG_H_ diff --git a/src/libaktualizr/package_manager/packagemanagerconfig_test.cc b/src/libaktualizr/package_manager/packagemanagerconfig_test.cc new file mode 100644 index 0000000000..dab32fea9b --- /dev/null +++ b/src/libaktualizr/package_manager/packagemanagerconfig_test.cc @@ -0,0 +1,25 @@ +#include + +#include "libaktualizr/config.h" +#include "logging/logging.h" + +TEST(PackageManagerConfig, WriteToStream) { + PackageConfig config; + config.os = "amiga"; + config.fake_need_reboot = true; + config.extra["foo"] = "bar"; + std::stringstream out; + config.writeToStream(out); + std::string cfg = out.str(); + + ASSERT_NE(std::string::npos, cfg.find("os = \"amiga\"")); + ASSERT_NE(std::string::npos, cfg.find("fake_need_reboot = 1")); + ASSERT_NE(std::string::npos, cfg.find("foo = \"bar\"")); +} + +#ifndef __NO_MAIN__ +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} +#endif diff --git a/src/libaktualizr/package_manager/packagemanagerfactory.cc b/src/libaktualizr/package_manager/packagemanagerfactory.cc index 2bf78d7245..d28e359575 100644 --- a/src/libaktualizr/package_manager/packagemanagerfactory.cc +++ b/src/libaktualizr/package_manager/packagemanagerfactory.cc @@ -1,56 +1,45 @@ -#include "package_manager/packagemanagerfactory.h" -#include "package_manager/packagemanagerfake.h" +#include "libaktualizr/packagemanagerfactory.h" -#ifdef BUILD_OSTREE -#include "package_manager/ostreemanager.h" -#ifdef BUILD_DOCKERAPP -#include "package_manager/dockerappmanager.h" -#endif -#endif +#include -#ifdef BUILD_DEB -#include "package_manager/debianmanager.h" -#endif +#include "logging/logging.h" -#if defined(ANDROID) -#include "package_manager/androidmanager.h" -#endif +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +static std::map *registered_pkgms_; -#include "logging/logging.h" +bool PackageManagerFactory::registerPackageManager(const char *name, PackageManagerBuilder builder) { + // this trick is needed so that this object is constructed before any element + // is added to it + static std::map rpkgms; + + if (registered_pkgms_ == nullptr) { + registered_pkgms_ = &rpkgms; + } + if (registered_pkgms_->find(name) != registered_pkgms_->end()) { + throw std::runtime_error(std::string("fatal: tried to register package manager \"") + name + "\" twice"); + } + (*registered_pkgms_)[name] = std::move(builder); + return true; +} std::shared_ptr PackageManagerFactory::makePackageManager( - const PackageConfig& pconfig, const std::shared_ptr& storage, - const std::shared_ptr& bootloader, const std::shared_ptr& http) { - (void)bootloader; - switch (pconfig.type) { - case PackageManager::kOstree: -#ifdef BUILD_OSTREE - return std::make_shared(pconfig, storage, bootloader, http); -#else - throw std::runtime_error("aktualizr was compiled without OStree support!"); -#endif - case PackageManager::kDebian: -#ifdef BUILD_DEB - return std::make_shared(pconfig, storage, bootloader, http); -#else - throw std::runtime_error("aktualizr was compiled without debian packages support!"); -#endif - case PackageManager::kAndroid: -#if defined(ANDROID) - return std::make_shared(pconfig, storage, bootloader, http); -#else - throw std::runtime_error("aktualizr was compiled without android support!"); -#endif - case PackageManager::kOstreeDockerApp: -#if defined(BUILD_DOCKERAPP) && defined(BUILD_OSTREE) - return std::make_shared(pconfig, storage, bootloader, http); -#else - throw std::runtime_error("aktualizr was compiled without ostree+docker-app support!"); -#endif - case PackageManager::kNone: - return std::make_shared(pconfig, storage, bootloader, http); - default: - LOG_ERROR << "Unrecognized package manager type: " << static_cast(pconfig.type); - return std::shared_ptr(); // NULL-equivalent + const PackageConfig &pconfig, const BootloaderConfig &bconfig, const std::shared_ptr &storage, + const std::shared_ptr &http) { + for (const auto &b : *registered_pkgms_) { + if (b.first == pconfig.type) { + PackageManagerInterface *pkgm = b.second(pconfig, bconfig, storage, http); + return std::shared_ptr(pkgm); + } } + + LOG_ERROR << "Package manager type \"" << pconfig.type << "\" does not exist"; + LOG_ERROR << "Available options are: " << []() { + std::stringstream ss; + for (const auto &b : *registered_pkgms_) { + ss << "\n" << b.first; + } + return ss.str(); + }(); + + throw std::runtime_error(std::string("Unsupported package manager: ") + pconfig.type); } diff --git a/src/libaktualizr/package_manager/packagemanagerfactory.h b/src/libaktualizr/package_manager/packagemanagerfactory.h deleted file mode 100644 index def447b22b..0000000000 --- a/src/libaktualizr/package_manager/packagemanagerfactory.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef PACKAGEMANAGERFACTORY_H_ -#define PACKAGEMANAGERFACTORY_H_ - -#include "config/config.h" -#include "package_manager/packagemanagerinterface.h" -#include "storage/invstorage.h" - -class PackageManagerFactory { - public: - static std::shared_ptr makePackageManager(const PackageConfig& pconfig, - const std::shared_ptr& storage, - const std::shared_ptr& bootloader, - const std::shared_ptr& http); -}; - -#endif // PACKAGEMANAGERFACTORY_H_ diff --git a/src/libaktualizr/package_manager/packagemanagerfactory_test.cc b/src/libaktualizr/package_manager/packagemanagerfactory_test.cc index bc47dacd41..9914284332 100644 --- a/src/libaktualizr/package_manager/packagemanagerfactory_test.cc +++ b/src/libaktualizr/package_manager/packagemanagerfactory_test.cc @@ -1,76 +1,82 @@ #include -#include #include -#include "config/config.h" -#include "package_manager/packagemanagerfactory.h" -#include "package_manager/packagemanagerinterface.h" +#include + +#include "libaktualizr/config.h" +#include "libaktualizr/packagemanagerfactory.h" +#include "libaktualizr/packagemanagerinterface.h" +#include "package_manager/packagemanagerfake.h" #include "storage/invstorage.h" #include "utilities/utils.h" boost::filesystem::path sysroot; -/* Support OSTree as a package manager. */ +#ifdef BUILD_OSTREE TEST(PackageManagerFactory, Ostree) { Config config; - config.pacman.type = PackageManager::kOstree; + config.pacman.type = PACKAGE_MANAGER_OSTREE; config.pacman.sysroot = sysroot; + config.pacman.os = "dummy-os"; TemporaryDirectory dir; config.storage.path = dir.Path(); std::shared_ptr storage = INvStorage::newStorage(config.storage); -#ifdef BUILD_OSTREE std::shared_ptr pacman = - PackageManagerFactory::makePackageManager(config.pacman, storage, nullptr, nullptr); + PackageManagerFactory::makePackageManager(config.pacman, config.bootloader, storage, nullptr); EXPECT_TRUE(pacman); -#else - EXPECT_THROW(std::shared_ptr pacman = - PackageManagerFactory::makePackageManager(config.pacman, storage, nullptr, nullptr), - std::runtime_error); -#endif } +#endif -TEST(PackageManagerFactory, Debian) { +TEST(PackageManagerFactory, None) { Config config; - config.pacman.type = PackageManager::kDebian; TemporaryDirectory dir; config.storage.path = dir.Path(); std::shared_ptr storage = INvStorage::newStorage(config.storage); -#ifdef BUILD_DEB + config.pacman.type = PACKAGE_MANAGER_NONE; std::shared_ptr pacman = - PackageManagerFactory::makePackageManager(config.pacman, storage, nullptr, nullptr); + PackageManagerFactory::makePackageManager(config.pacman, config.bootloader, storage, nullptr); EXPECT_TRUE(pacman); -#else - EXPECT_THROW(std::shared_ptr pacman = - PackageManagerFactory::makePackageManager(config.pacman, storage, nullptr, nullptr), - std::runtime_error); -#endif } -TEST(PackageManagerFactory, None) { +TEST(PackageManagerFactory, Bad) { Config config; TemporaryDirectory dir; config.storage.path = dir.Path(); + config.pacman.type = "bad"; std::shared_ptr storage = INvStorage::newStorage(config.storage); - config.pacman.type = PackageManager::kNone; - std::shared_ptr pacman = - PackageManagerFactory::makePackageManager(config.pacman, storage, nullptr, nullptr); - EXPECT_TRUE(pacman); + EXPECT_THROW(PackageManagerFactory::makePackageManager(config.pacman, config.bootloader, storage, nullptr), + std::runtime_error); } -TEST(PackageManagerFactory, Bad) { +TEST(PackageManagerFactory, Register) { + // a package manager cannot be registered twice + EXPECT_THROW(PackageManagerFactory::registerPackageManager( + "none", + [](const PackageConfig&, const BootloaderConfig&, const std::shared_ptr&, + const std::shared_ptr&) -> PackageManagerInterface* { + throw std::runtime_error("unimplemented"); + }), + std::runtime_error); + + PackageManagerFactory::registerPackageManager( + "new", + [](const PackageConfig& pconfig, const BootloaderConfig& bconfig, const std::shared_ptr& storage, + const std::shared_ptr& http) -> PackageManagerInterface* { + return new PackageManagerFake(pconfig, bconfig, storage, http); + }); + Config config; - TemporaryDirectory dir; - config.storage.path = dir.Path(); - config.pacman.type = (PackageManager)-1; + TemporaryDirectory temp_dir; + config.storage.path = temp_dir.Path(); + config.pacman.type = "new"; std::shared_ptr storage = INvStorage::newStorage(config.storage); - std::shared_ptr pacman = - PackageManagerFactory::makePackageManager(config.pacman, storage, nullptr, nullptr); - EXPECT_FALSE(pacman); + + EXPECT_NE(PackageManagerFactory::makePackageManager(config.pacman, config.bootloader, storage, nullptr), nullptr); } #ifndef __NO_MAIN__ -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); if (argc != 2) { diff --git a/src/libaktualizr/package_manager/packagemanagerfake.cc b/src/libaktualizr/package_manager/packagemanagerfake.cc index b0c108703e..66036d2587 100644 --- a/src/libaktualizr/package_manager/packagemanagerfake.cc +++ b/src/libaktualizr/package_manager/packagemanagerfake.cc @@ -1,7 +1,13 @@ -#include "packagemanagerfake.h" +#include "libaktualizr/packagemanagerfactory.h" +#include "logging/logging.h" +#include "packagemanagerfake.h" +#include "storage/invstorage.h" #include "utilities/fault_injection.h" +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +AUTO_REGISTER_PACKAGE_MANAGER(PACKAGE_MANAGER_NONE, PackageManagerFake); + Json::Value PackageManagerFake::getInstalledPackages() const { Json::Value packages(Json::arrayValue); Json::Value package; @@ -51,7 +57,12 @@ void PackageManagerFake::completeInstall() const { bootloader_->reboot(true); } -data::InstallationResult PackageManagerFake::finalizeInstall(const Uptane::Target& target) const { +data::InstallationResult PackageManagerFake::finalizeInstall(const Uptane::Target& target) { + if (config.fake_need_reboot && !bootloader_->rebootDetected()) { + return data::InstallationResult(data::ResultCode::Numeric::kNeedCompletion, + "Reboot is required for the pending update application"); + } + boost::optional pending_version; storage_->loadPrimaryInstalledVersions(nullptr, &pending_version); @@ -80,11 +91,14 @@ data::InstallationResult PackageManagerFake::finalizeInstall(const Uptane::Targe data::InstallationResult(data::ResultCode::Numeric::kInternalError, "Pending and new target do not match"); } + if (config.fake_need_reboot) { + bootloader_->rebootFlagClear(); + } return install_res; } bool PackageManagerFake::fetchTarget(const Uptane::Target& target, Uptane::Fetcher& fetcher, const KeyManager& keys, - FetcherProgressCb progress_cb, const api::FlowControlToken* token) { + const FetcherProgressCb& progress_cb, const api::FlowControlToken* token) { // fault injection: only enabled with FIU_ENABLE defined. Note that all // exceptions thrown in PackageManagerInterface::fetchTarget are caught by a // try in the same function, so we can only emulate the warning and return @@ -99,5 +113,12 @@ bool PackageManagerFake::fetchTarget(const Uptane::Target& target, Uptane::Fetch return false; } + // TODO(OTA-4939): Unify this with the check in + // SotaUptaneClient::getNewTargets() and make it more generic. + if (target.IsOstree()) { + LOG_ERROR << "Cannot download OSTree target " << target.filename() << " with the fake package manager!"; + return false; + } + return PackageManagerInterface::fetchTarget(target, fetcher, keys, progress_cb, token); } diff --git a/src/libaktualizr/package_manager/packagemanagerfake.h b/src/libaktualizr/package_manager/packagemanagerfake.h index d5eae5eec6..4de33e346e 100644 --- a/src/libaktualizr/package_manager/packagemanagerfake.h +++ b/src/libaktualizr/package_manager/packagemanagerfake.h @@ -4,25 +4,34 @@ #include #include -#include "package_manager/packagemanagerinterface.h" +#include "libaktualizr/packagemanagerinterface.h" + +#include "bootloader/bootloader.h" class PackageManagerFake : public PackageManagerInterface { public: - PackageManagerFake(PackageConfig pconfig, std::shared_ptr storage, std::shared_ptr bootloader, - std::shared_ptr http) - : PackageManagerInterface(std::move(pconfig), std::move(storage), std::move(bootloader), std::move(http)) {} + PackageManagerFake(const PackageConfig &pconfig, const BootloaderConfig &bconfig, + const std::shared_ptr &storage, const std::shared_ptr &http) + : PackageManagerInterface(pconfig, bconfig, storage, http), bootloader_{new Bootloader(bconfig, *storage_)} {} ~PackageManagerFake() override = default; + PackageManagerFake(const PackageManagerFake &) = delete; + PackageManagerFake(PackageManagerFake &&) = delete; + PackageManagerFake &operator=(const PackageManagerFake &) = delete; + PackageManagerFake &operator=(PackageManagerFake &&) = delete; std::string name() const override { return "fake"; } Json::Value getInstalledPackages() const override; Uptane::Target getCurrent() const override; - bool imageUpdated() override { return true; }; data::InstallationResult install(const Uptane::Target &target) const override; void completeInstall() const override; - data::InstallationResult finalizeInstall(const Uptane::Target &target) const override; + data::InstallationResult finalizeInstall(const Uptane::Target &target) override; + void updateNotify() override { bootloader_->updateNotify(); }; bool fetchTarget(const Uptane::Target &target, Uptane::Fetcher &fetcher, const KeyManager &keys, - FetcherProgressCb progress_cb, const api::FlowControlToken *token) override; + const FetcherProgressCb &progress_cb, const api::FlowControlToken *token) override; + + private: + std::unique_ptr bootloader_; }; #endif // PACKAGEMANAGERFAKE_H_ diff --git a/src/libaktualizr/package_manager/packagemanagerfake_test.cc b/src/libaktualizr/package_manager/packagemanagerfake_test.cc index 0df35d557e..6604a634ec 100644 --- a/src/libaktualizr/package_manager/packagemanagerfake_test.cc +++ b/src/libaktualizr/package_manager/packagemanagerfake_test.cc @@ -7,14 +7,103 @@ #include -#include "config/config.h" +#include "crypto/keymanager.h" #include "httpfake.h" +#include "libaktualizr/config.h" +#include "libaktualizr/types.h" #include "package_manager/packagemanagerfake.h" #include "storage/invstorage.h" +#include "uptane/fetcher.h" #include "uptane/tuf.h" -#include "utilities/types.h" #include "utilities/utils.h" +// Test creating, appending and reading binary targets. +TEST(PackageManagerFake, Binary) { + TemporaryDirectory temp_dir; + Config config; + config.pacman.type = PACKAGE_MANAGER_NONE; + config.pacman.images_path = temp_dir.Path() / "images"; + config.storage.path = temp_dir.Path(); + + auto storage = INvStorage::newStorage(config.storage); + PackageManagerFake pacman(config.pacman, config.bootloader, storage, nullptr); + + Json::Value target_json; + target_json["hashes"]["sha256"] = "D9CD8155764C3543F10FAD8A480D743137466F8D55213C8EAEFCD12F06D43A80"; + Uptane::Target target("aa.bin", target_json); + + { + auto out = pacman.createTargetFile(target); + out << "a"; + } + { + auto in = pacman.openTargetFile(target); + std::stringstream ss; + ss << in.rdbuf(); + ASSERT_EQ(ss.str(), "a"); + } + { + auto out = pacman.appendTargetFile(target); + out << "a"; + } + { + auto in = pacman.openTargetFile(target); + std::stringstream ss; + ss << in.rdbuf(); + ASSERT_EQ(ss.str(), "aa"); + } + // Test overwriting + { + auto out = pacman.createTargetFile(target); + out << "a"; + } + { + auto in = pacman.openTargetFile(target); + std::stringstream ss; + ss << in.rdbuf(); + ASSERT_EQ(ss.str(), "a"); + } + + pacman.removeTargetFile(target); + EXPECT_THROW(pacman.appendTargetFile(target), std::runtime_error); + EXPECT_THROW(pacman.openTargetFile(target), std::runtime_error); +} + +// Test listing and removing binary targets +TEST(PackageManagerFake, ListRemove) { + TemporaryDirectory temp_dir; + Config config; + config.pacman.type = PACKAGE_MANAGER_NONE; + config.pacman.images_path = temp_dir.Path() / "images"; + config.storage.path = temp_dir.Path(); + + auto storage = INvStorage::newStorage(config.storage); + PackageManagerFake pacman(config.pacman, config.bootloader, storage, nullptr); + + Json::Value target_json; + target_json["hashes"]["sha256"] = "D9CD8155764C3543F10FAD8A480D743137466F8D55213C8EAEFCD12F06D43A80"; + Uptane::Target t1("aa.bin", target_json); + target_json["hashes"]["sha256"] = "A81C31AC62620B9215A14FF00544CB07A55B765594F3AB3BE77E70923AE27CF1"; + Uptane::Target t2("bb.bin", target_json); + + pacman.createTargetFile(t1); + pacman.createTargetFile(t2); + + auto targets = pacman.getTargetFiles(); + ASSERT_EQ(targets.size(), 2); + ASSERT_EQ(targets.at(0).filename(), "aa.bin"); + ASSERT_EQ(targets.at(1).filename(), "bb.bin"); + + pacman.removeTargetFile(t1); + targets = pacman.getTargetFiles(); + ASSERT_EQ(targets.size(), 1); + ASSERT_EQ(targets.at(0).filename(), "bb.bin"); + EXPECT_FALSE(boost::filesystem::exists(temp_dir.Path() / "images" / + "D9CD8155764C3543F10FAD8A480D743137466F8D55213C8EAEFCD12F06D43A80")); + EXPECT_TRUE(boost::filesystem::exists(temp_dir.Path() / "images" / + "A81C31AC62620B9215A14FF00544CB07A55B765594F3AB3BE77E70923AE27CF1")); +} + /* * Verify a stored target. * Verify that a target is unavailable. @@ -25,68 +114,72 @@ TEST(PackageManagerFake, Verify) { TemporaryDirectory temp_dir; Config config; - config.pacman.type = PackageManager::kNone; + config.pacman.type = PACKAGE_MANAGER_NONE; + config.pacman.images_path = temp_dir.Path() / "images"; config.storage.path = temp_dir.Path(); std::shared_ptr storage = INvStorage::newStorage(config.storage); Uptane::EcuMap primary_ecu{{Uptane::EcuSerial("primary"), Uptane::HardwareIdentifier("primary_hw")}}; const int length = 4; - uint8_t content[length]; + char content[length]; memcpy(content, "good", length); MultiPartSHA256Hasher hasher; - hasher.update(content, length); + hasher.update(reinterpret_cast(content), length); const std::string hash = hasher.getHexDigest(); - Uptane::Target target("some-pkg", primary_ecu, {Uptane::Hash(Uptane::Hash::Type::kSha256, hash)}, length, ""); + Uptane::Target target("some-pkg", primary_ecu, {Hash(Hash::Type::kSha256, hash)}, length, ""); - PackageManagerFake fakepm(config.pacman, storage, nullptr, nullptr); + PackageManagerFake fakepm(config.pacman, config.bootloader, storage, nullptr); // Target is not yet available. EXPECT_EQ(fakepm.verifyTarget(target), TargetStatus::kNotFound); // Target has a bad hash. - auto whandle = storage->allocateTargetFile(false, target); - uint8_t content_bad[length + 1]; + auto whandle = fakepm.createTargetFile(target); + char content_bad[length + 1]; memset(content_bad, 0, length + 1); - EXPECT_EQ(whandle->wfeed(content_bad, length), length); - whandle->wcommit(); + whandle.write(content_bad, length); + whandle.close(); EXPECT_EQ(fakepm.verifyTarget(target), TargetStatus::kHashMismatch); // Target is oversized. - whandle = storage->allocateTargetFile(false, target); - EXPECT_EQ(whandle->wfeed(content_bad, length + 1), length + 1); - whandle->wcommit(); + whandle = fakepm.createTargetFile(target); + whandle.write(content_bad, length + 1); + whandle.close(); EXPECT_EQ(fakepm.verifyTarget(target), TargetStatus::kOversized); // Target is incomplete. - whandle = storage->allocateTargetFile(false, target); - EXPECT_EQ(whandle->wfeed(content, length - 1), length - 1); - whandle->wcommit(); + whandle = fakepm.createTargetFile(target); + whandle.write(content, length - 1); + whandle.close(); EXPECT_EQ(fakepm.verifyTarget(target), TargetStatus::kIncomplete); // Target is good. - whandle = storage->allocateTargetFile(false, target); - EXPECT_EQ(whandle->wfeed(content, length), length); - whandle->wcommit(); + whandle = fakepm.createTargetFile(target); + whandle.write(content, length); + whandle.close(); EXPECT_EQ(fakepm.verifyTarget(target), TargetStatus::kGood); } TEST(PackageManagerFake, FinalizeAfterReboot) { TemporaryDirectory temp_dir; Config config; - config.pacman.type = PackageManager::kNone; + config.pacman.type = PACKAGE_MANAGER_NONE; + config.pacman.images_path = temp_dir.Path() / "images"; config.pacman.fake_need_reboot = true; config.bootloader.reboot_sentinel_dir = temp_dir.Path(); config.storage.path = temp_dir.Path(); std::shared_ptr storage = INvStorage::newStorage(config.storage); std::shared_ptr bootloader = std::make_shared(config.bootloader, *storage); - PackageManagerFake fakepm(config.pacman, storage, bootloader, nullptr); + PackageManagerFake fakepm(config.pacman, config.bootloader, storage, nullptr); Uptane::EcuMap primary_ecu; - Uptane::Target target("pkg", primary_ecu, {Uptane::Hash(Uptane::Hash::Type::kSha256, "hash")}, 1, ""); + Uptane::Target target("pkg", primary_ecu, {Hash(Hash::Type::kSha256, "hash")}, 1, ""); auto result = fakepm.install(target); EXPECT_EQ(result.result_code, data::ResultCode::Numeric::kNeedCompletion); storage->savePrimaryInstalledVersion(target, InstalledVersionUpdateMode::kPending); + fakepm.completeInstall(); + result = fakepm.finalizeInstall(target); EXPECT_EQ(result.result_code, data::ResultCode::Numeric::kOk); } @@ -98,20 +191,21 @@ TEST(PackageManagerFake, FinalizeAfterReboot) { TEST(PackageManagerFake, DownloadFailureInjection) { TemporaryDirectory temp_dir; Config config; - config.pacman.type = PackageManager::kNone; + config.pacman.type = PACKAGE_MANAGER_NONE; + config.pacman.images_path = temp_dir.Path() / "images"; config.storage.path = temp_dir.Path(); std::shared_ptr storage = INvStorage::newStorage(config.storage); auto http = std::make_shared(temp_dir.Path()); Uptane::Fetcher uptane_fetcher(config, http); KeyManager keys(storage, config.keymanagerConfig()); - PackageManagerFake fakepm(config.pacman, storage, nullptr, http); + PackageManagerFake fakepm(config.pacman, config.bootloader, storage, http); fault_injection_init(); // no fault Uptane::EcuMap primary_ecu{{Uptane::EcuSerial("primary"), Uptane::HardwareIdentifier("primary_hw")}}; - Uptane::Target target("pkg", primary_ecu, {Uptane::Hash(Uptane::Hash::Type::kSha256, "hash")}, 0, ""); + Uptane::Target target("pkg", primary_ecu, {Hash(Hash::Type::kSha256, "hash")}, 0, ""); EXPECT_TRUE(fakepm.fetchTarget(target, uptane_fetcher, keys, nullptr, nullptr)); // fault @@ -129,17 +223,18 @@ TEST(PackageManagerFake, DownloadFailureInjection) { TEST(PackageManagerFake, InstallFailureInjection) { TemporaryDirectory temp_dir; Config config; - config.pacman.type = PackageManager::kNone; + config.pacman.type = PACKAGE_MANAGER_NONE; + config.pacman.images_path = temp_dir.Path() / "images"; config.storage.path = temp_dir.Path(); std::shared_ptr storage = INvStorage::newStorage(config.storage); - PackageManagerFake fakepm(config.pacman, storage, nullptr, nullptr); + PackageManagerFake fakepm(config.pacman, config.bootloader, storage, nullptr); fault_injection_init(); // no fault Uptane::EcuMap primary_ecu{{Uptane::EcuSerial("primary"), Uptane::HardwareIdentifier("primary_hw")}}; - Uptane::Target target("pkg", primary_ecu, {Uptane::Hash(Uptane::Hash::Type::kSha256, "hash")}, 1, ""); + Uptane::Target target("pkg", primary_ecu, {Hash(Hash::Type::kSha256, "hash")}, 1, ""); auto result = fakepm.install(target); EXPECT_EQ(result.result_code, data::ResultCode::Numeric::kOk); diff --git a/src/libaktualizr/package_manager/packagemanagerinterface.cc b/src/libaktualizr/package_manager/packagemanagerinterface.cc index 94b5d8be85..a17995979a 100644 --- a/src/libaktualizr/package_manager/packagemanagerinterface.cc +++ b/src/libaktualizr/package_manager/packagemanagerinterface.cc @@ -1,23 +1,35 @@ -#include "packagemanagerinterface.h" +#include "libaktualizr/packagemanagerinterface.h" +#include +#include +#include + +#include "crypto/crypto.h" +#include "crypto/keymanager.h" #include "http/httpclient.h" #include "logging/logging.h" +#include "storage/invstorage.h" +#include "uptane/exceptions.h" +#include "uptane/fetcher.h" +#include "utilities/apiqueue.h" struct DownloadMetaStruct { + public: DownloadMetaStruct(Uptane::Target target_in, FetcherProgressCb progress_cb_in, const api::FlowControlToken* token_in) : hash_type{target_in.hashes()[0].type()}, target{std::move(target_in)}, token{token_in}, - progress_cb{std::move(progress_cb_in)} {} - uint64_t downloaded_length{0}; + progress_cb{std::move(progress_cb_in)}, + time_lastreport{std::chrono::steady_clock::now()} {} + uintmax_t downloaded_length{0}; unsigned int last_progress{0}; - std::unique_ptr fhandle; - const Uptane::Hash::Type hash_type; + std::ofstream fhandle; + const Hash::Type hash_type; MultiPartHasher& hasher() { switch (hash_type) { - case Uptane::Hash::Type::kSha256: + case Hash::Type::kSha256: return sha256_hasher; - case Uptane::Hash::Type::kSha512: + case Hash::Type::kSha512: return sha512_hasher; default: throw std::runtime_error("Unknown hash algorithm"); @@ -26,6 +38,8 @@ struct DownloadMetaStruct { Uptane::Target target; const api::FlowControlToken* token; FetcherProgressCb progress_cb; + // each LogProgressInterval msec log dowload progress for big files + std::chrono::time_point time_lastreport; private: MultiPartSHA256Hasher sha256_hasher; @@ -34,33 +48,40 @@ struct DownloadMetaStruct { static size_t DownloadHandler(char* contents, size_t size, size_t nmemb, void* userp) { assert(userp); - auto ds = static_cast(userp); - uint64_t downloaded = size * nmemb; + auto* ds = static_cast(userp); + size_t downloaded = size * nmemb; uint64_t expected = ds->target.length(); if ((ds->downloaded_length + downloaded) > expected) { return downloaded + 1; // curl will abort if return unexpected size; } - // incomplete writes will stop the download (written_size != nmemb*size) - size_t written_size = ds->fhandle->wfeed(reinterpret_cast(contents), downloaded); - ds->hasher().update(reinterpret_cast(contents), written_size); - + ds->fhandle.write(contents, static_cast(downloaded)); + ds->hasher().update(reinterpret_cast(contents), downloaded); ds->downloaded_length += downloaded; - return written_size; + return downloaded; } +static constexpr int64_t LogProgressInterval = 15000; + static int ProgressHandler(void* clientp, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) { (void)dltotal; (void)dlnow; (void)ultotal; (void)ulnow; - auto ds = static_cast(clientp); + auto* ds = static_cast(clientp); uint64_t expected = ds->target.length(); auto progress = static_cast((ds->downloaded_length * 100) / expected); if (ds->progress_cb && progress > ds->last_progress) { ds->last_progress = progress; ds->progress_cb(ds->target, "Downloading", progress); + // OTA-4864:Improve binary file download progress logging. Report each XX sec report event that notify user + auto now = std::chrono::steady_clock::now(); + auto milliseconds = std::chrono::duration_cast(now - ds->time_lastreport); + if (milliseconds.count() > LogProgressInterval) { + LOG_INFO << "Download progress for file " << ds->target.filename() << ": " << progress << "%"; + ds->time_lastreport = now; + } } if (ds->token != nullptr && !ds->token->canContinue(false)) { return 1; @@ -68,18 +89,17 @@ static int ProgressHandler(void* clientp, curl_off_t dltotal, curl_off_t dlnow, return 0; } -static void restoreHasherState(MultiPartHasher& hasher, StorageTargetRHandle* data) { - size_t data_len; - size_t buf_len = 1024; - uint8_t buf[buf_len]; +static void restoreHasherState(MultiPartHasher& hasher, std::ifstream data) { + static constexpr size_t buf_len = 1024; + std::array buf{}; do { - data_len = data->rread(buf, buf_len); - hasher.update(buf, data_len); - } while (data_len != 0); + data.read(reinterpret_cast(buf.data()), buf.size()); + hasher.update(buf.data(), static_cast(data.gcount())); + } while (data.gcount() != 0); } bool PackageManagerInterface::fetchTarget(const Uptane::Target& target, Uptane::Fetcher& fetcher, - const KeyManager& keys, FetcherProgressCb progress_cb, + const KeyManager& keys, const FetcherProgressCb& progress_cb, const api::FlowControlToken* token) { (void)keys; bool result = false; @@ -92,22 +112,28 @@ bool PackageManagerInterface::fetchTarget(const Uptane::Target& target, Uptane:: LOG_INFO << "Image already downloaded; skipping download"; return true; } + std::unique_ptr ds = std_::make_unique(target, progress_cb, token); if (target.length() == 0) { - LOG_WARNING << "Skipping download of target with length 0"; + LOG_INFO << "Skipping download of target with length 0"; + ds->fhandle = createTargetFile(target); return true; } - DownloadMetaStruct ds(target, std::move(progress_cb), token); if (exists == TargetStatus::kIncomplete) { - auto target_check = storage_->checkTargetFile(target); - ds.downloaded_length = target_check->first; - auto target_handle = storage_->openTargetFile(target); - ::restoreHasherState(ds.hasher(), target_handle.get()); - target_handle->rclose(); - ds.fhandle = target_handle->toWriteHandle(); + LOG_INFO << "Continuing incomplete download of file " << target.filename(); + auto target_check = checkTargetFile(target); + ds->downloaded_length = target_check->first; + ::restoreHasherState(ds->hasher(), openTargetFile(target)); + ds->fhandle = appendTargetFile(target); } else { // If the target was found, but is oversized or the hash doesn't match, // just start over. - ds.fhandle = storage_->allocateTargetFile(false, target); + LOG_DEBUG << "Initiating download of file " << target.filename(); + ds->fhandle = createTargetFile(target); + } + + const uint64_t required_bytes = target.length() - ds->downloaded_length; + if (!checkAvailableDiskSpace(required_bytes)) { + throw std::runtime_error("Insufficient disk space available to download target"); } std::string target_url = target.uri(); @@ -117,17 +143,27 @@ bool PackageManagerInterface::fetchTarget(const Uptane::Target& target, Uptane:: HttpResponse response; for (;;) { - response = http_->download(target_url, DownloadHandler, ProgressHandler, &ds, - static_cast(ds.downloaded_length)); + response = http_->download(target_url, DownloadHandler, ProgressHandler, ds.get(), + static_cast(ds->downloaded_length)); + + if (response.curl_code == CURLE_RANGE_ERROR) { + LOG_WARNING << "The image server doesn't support byte range requests," + " try to download the image from the beginning: " + << target_url; + ds = std_::make_unique(target, progress_cb, token); + ds->fhandle = createTargetFile(target); + continue; + } + if (!response.wasInterrupted()) { break; } - ds.fhandle.reset(); + ds->fhandle.close(); // sleep if paused or abort the download if (!token->canContinue()) { throw Uptane::Exception("image", "Download of a target was aborted"); } - ds.fhandle = storage_->openTargetFile(target)->toWriteHandle(); + ds->fhandle = appendTargetFile(target); } LOG_TRACE << "Download status: " << response.getStatusStr() << std::endl; if (!response.isOk()) { @@ -136,38 +172,126 @@ bool PackageManagerInterface::fetchTarget(const Uptane::Target& target, Uptane:: } throw Uptane::Exception("image", "Could not download file, error: " + response.error_message); } - if (!target.MatchHash(Uptane::Hash(ds.hash_type, ds.hasher().getHexDigest()))) { - ds.fhandle->wabort(); + if (!target.MatchHash(Hash(ds->hash_type, ds->hasher().getHexDigest()))) { + ds->fhandle.close(); + removeTargetFile(target); throw Uptane::TargetHashMismatch(target.filename()); } - ds.fhandle->wcommit(); + ds->fhandle.close(); result = true; - } catch (const Uptane::Exception& e) { + } catch (const std::exception& e) { LOG_WARNING << "Error while downloading a target: " << e.what(); } return result; } TargetStatus PackageManagerInterface::verifyTarget(const Uptane::Target& target) const { - auto target_exists = storage_->checkTargetFile(target); + auto target_exists = checkTargetFile(target); if (!target_exists) { + LOG_DEBUG << "File " << target.filename() << " with expected hash not found in the database."; return TargetStatus::kNotFound; } else if (target_exists->first < target.length()) { + LOG_DEBUG << "File " << target.filename() << " was found in the database, but is incomplete."; return TargetStatus::kIncomplete; } else if (target_exists->first > target.length()) { + LOG_DEBUG << "File " << target.filename() << " was found in the database, but is oversized."; return TargetStatus::kOversized; } // Even if the file exists and the length matches, recheck the hash. DownloadMetaStruct ds(target, nullptr, nullptr); ds.downloaded_length = target_exists->first; - auto target_handle = storage_->openTargetFile(target); - ::restoreHasherState(ds.hasher(), target_handle.get()); - target_handle->rclose(); - if (!target.MatchHash(Uptane::Hash(ds.hash_type, ds.hasher().getHexDigest()))) { + ::restoreHasherState(ds.hasher(), openTargetFile(target)); + if (!target.MatchHash(Hash(ds.hash_type, ds.hasher().getHexDigest()))) { LOG_ERROR << "Target exists with expected length, but hash does not match metadata! " << target; return TargetStatus::kHashMismatch; } return TargetStatus::kGood; } + +bool PackageManagerInterface::checkAvailableDiskSpace(const uint64_t required_bytes) const { + struct statvfs stvfsbuf {}; + const int stat_res = statvfs(config.images_path.c_str(), &stvfsbuf); + if (stat_res < 0) { + LOG_WARNING << "Unable to read filesystem statistics: error code " << stat_res; + return true; + } + const uint64_t available_bytes = (static_cast(stvfsbuf.f_bsize) * stvfsbuf.f_bavail); + const uint64_t reserved_bytes = 1 << 20; + + if (required_bytes + reserved_bytes < available_bytes) { + return true; + } else { + LOG_ERROR << "Insufficient disk space available to download target! Required: " << required_bytes + << ", available: " << available_bytes << ", reserved: " << reserved_bytes; + return false; + } +} + +boost::optional> PackageManagerInterface::checkTargetFile( + const Uptane::Target& target) const { + std::string filename = storage_->getTargetFilename(target.filename()); + if (!filename.empty()) { + auto path = config.images_path / filename; + if (boost::filesystem::exists(path)) { + return {{boost::filesystem::file_size(path), path.string()}}; + } + } + return boost::none; +} + +std::ifstream PackageManagerInterface::openTargetFile(const Uptane::Target& target) const { + auto file = checkTargetFile(target); + if (!file) { + throw std::runtime_error("File doesn't exist for target " + target.filename()); + } + std::ifstream stream(file->second, std::ios::binary); + if (!stream.good()) { + throw std::runtime_error("Can't open file " + file->second); + } + return stream; +} + +std::ofstream PackageManagerInterface::createTargetFile(const Uptane::Target& target) { + std::string filename = target.hashes()[0].HashString(); + std::string filepath = (config.images_path / filename).string(); + boost::filesystem::create_directories(config.images_path); + std::ofstream stream(filepath, std::ios::binary | std::ios::ate); + if (!stream.good()) { + throw std::runtime_error("Can't write to file " + filepath); + } + storage_->storeTargetFilename(target.filename(), filename); + return stream; +} + +std::ofstream PackageManagerInterface::appendTargetFile(const Uptane::Target& target) { + auto file = checkTargetFile(target); + if (!file) { + throw std::runtime_error("File doesn't exist for target " + target.filename()); + } + std::ofstream stream(file->second, std::ios::binary | std::ios::app); + if (!stream.good()) { + throw std::runtime_error("Can't open file " + file->second); + } + return stream; +} + +void PackageManagerInterface::removeTargetFile(const Uptane::Target& target) { + auto file = checkTargetFile(target); + if (!file) { + throw std::runtime_error("File doesn't exist for target " + target.filename()); + } + boost::filesystem::remove(file->second); + storage_->deleteTargetInfo(target.filename()); +} + +std::vector PackageManagerInterface::getTargetFiles() { + std::vector v; + auto names = storage_->getAllTargetNames(); + v.reserve(names.size()); + for (const auto& name : names) { + v.emplace_back(name, Uptane::EcuMap{}, std::vector{}, 0); + } + return v; +} diff --git a/src/libaktualizr/package_manager/packagemanagerinterface.h b/src/libaktualizr/package_manager/packagemanagerinterface.h deleted file mode 100644 index 4587fb5ba3..0000000000 --- a/src/libaktualizr/package_manager/packagemanagerinterface.h +++ /dev/null @@ -1,79 +0,0 @@ -#ifndef PACKAGEMANAGERINTERFACE_H_ -#define PACKAGEMANAGERINTERFACE_H_ - -#include -#include - -#include "bootloader/bootloader.h" -#include "crypto/keymanager.h" -#include "http/httpinterface.h" -#include "packagemanagerconfig.h" -#include "storage/invstorage.h" -#include "uptane/fetcher.h" -#include "utilities/apiqueue.h" -#include "utilities/types.h" - -using FetcherProgressCb = std::function; - -/** - * Status of downloaded target. - */ -enum class TargetStatus { - /* Target has been downloaded and verified. */ - kGood = 0, - /* Target was not found. */ - kNotFound, - /* Target was found, but is incomplete. */ - kIncomplete, - /* Target was found, but is larger than expected. */ - kOversized, - /* Target was found, but hash did not match the metadata. */ - kHashMismatch, - /* Target was found and has valid metadata but the content is not suitable for the packagemanager */ - kInvalid, -}; - -class PackageManagerInterface { - public: - PackageManagerInterface(PackageConfig pconfig, std::shared_ptr storage, - std::shared_ptr bootloader, std::shared_ptr http) - : config(std::move(pconfig)), - storage_(std::move(storage)), - bootloader_(std::move(bootloader)), - http_(std::move(http)) {} - virtual ~PackageManagerInterface() = default; - virtual std::string name() const = 0; - virtual Json::Value getInstalledPackages() const = 0; - virtual Uptane::Target getCurrent() const = 0; - virtual data::InstallationResult install(const Uptane::Target& target) const = 0; - virtual void completeInstall() const { throw std::runtime_error("Unimplemented"); }; - virtual data::InstallationResult finalizeInstall(const Uptane::Target& target) const = 0; - virtual bool imageUpdated() = 0; - virtual bool fetchTarget(const Uptane::Target& target, Uptane::Fetcher& fetcher, const KeyManager& keys, - FetcherProgressCb progress_cb, const api::FlowControlToken* token); - virtual TargetStatus verifyTarget(const Uptane::Target& target) const; - - // only returns the version - Json::Value getManifest(const Uptane::EcuSerial& ecu_serial) const { - Uptane::Target installed_target = getCurrent(); - Json::Value installed_image; - installed_image["filepath"] = installed_target.filename(); - installed_image["fileinfo"]["length"] = Json::UInt64(installed_target.length()); - installed_image["fileinfo"]["hashes"]["sha256"] = installed_target.sha256Hash(); - - Json::Value unsigned_ecu_version; - unsigned_ecu_version["attacks_detected"] = ""; - unsigned_ecu_version["installed_image"] = installed_image; - unsigned_ecu_version["ecu_serial"] = ecu_serial.ToString(); - unsigned_ecu_version["previous_timeserver_time"] = "1970-01-01T00:00:00Z"; - unsigned_ecu_version["timeserver_time"] = "1970-01-01T00:00:00Z"; - return unsigned_ecu_version; - } - - protected: - PackageConfig config; - std::shared_ptr storage_; - std::shared_ptr bootloader_; - std::shared_ptr http_; -}; -#endif // PACKAGEMANAGERINTERFACE_H_ diff --git a/src/libaktualizr/primary/CMakeLists.txt b/src/libaktualizr/primary/CMakeLists.txt index a62a76c9f0..7a7073afe0 100644 --- a/src/libaktualizr/primary/CMakeLists.txt +++ b/src/libaktualizr/primary/CMakeLists.txt @@ -1,56 +1,113 @@ set(SOURCES aktualizr.cc aktualizr_helpers.cc - initializer.cc + provisioner.cc reportqueue.cc + secondary_provider.cc sotauptaneclient.cc) -set(HEADERS secondary_config.h - aktualizr.h - aktualizr_helpers.h - events.h - initializer.h +set(HEADERS aktualizr_helpers.h + provisioner.h reportqueue.h - results.h + secondary_config.h + secondary_provider_builder.h sotauptaneclient.h) - add_library(primary OBJECT ${SOURCES}) -add_aktualizr_test(NAME aktualizr SOURCES aktualizr_test.cc PROJECT_WORKING_DIRECTORY ARGS ${PROJECT_BINARY_DIR}/uptane_repos LIBRARIES uptane_generator_lib) +add_library(provisioner_test_utils STATIC provisioner_test_utils.cc) +aktualizr_source_file_checks(provisioner_test_utils.cc provisioner_test_utils.h) + + +add_aktualizr_test(NAME aktualizr + SOURCES aktualizr_test.cc + PROJECT_WORKING_DIRECTORY + ARGS ${PROJECT_BINARY_DIR}/uptane_repos + LIBRARIES uptane_generator_lib virtual_secondary) add_dependencies(t_aktualizr uptane_repo_full_no_correlation_id) -target_link_libraries(t_aktualizr virtual_secondary) + +add_aktualizr_test(NAME reregistration + SOURCES reregistration_test.cc + PROJECT_WORKING_DIRECTORY + LIBRARIES uptane_generator_lib virtual_secondary) if (BUILD_OSTREE) - add_aktualizr_test(NAME aktualizr_fullostree SOURCES aktualizr_fullostree_test.cc PROJECT_WORKING_DIRECTORY ARGS $ ${PROJECT_BINARY_DIR}/ostree_repo) + add_aktualizr_test(NAME aktualizr_fullostree + SOURCES aktualizr_fullostree_test.cc + PROJECT_WORKING_DIRECTORY + ARGS ${PROJECT_BINARY_DIR}/ostree_repo + LIBRARIES uptane_generator_lib virtual_secondary) set_target_properties(t_aktualizr_fullostree PROPERTIES LINK_FLAGS -Wl,--export-dynamic) add_dependencies(t_aktualizr_fullostree uptane-generator make_ostree_sysroot) - target_link_libraries(t_aktualizr_fullostree virtual_secondary) - add_aktualizr_test(NAME download_nonostree SOURCES download_nonostree_test.cc PROJECT_WORKING_DIRECTORY ARGS $ ${PROJECT_BINARY_DIR}/ostree_repo) + + add_aktualizr_test(NAME download_nonostree + SOURCES download_nonostree_test.cc + PROJECT_WORKING_DIRECTORY + ARGS $ ${PROJECT_BINARY_DIR}/ostree_repo + LIBRARIES virtual_secondary) add_dependencies(t_download_nonostree uptane-generator make_ostree_sysroot) - set_tests_properties(test_download_nonostree PROPERTIES - LABELS "noptest") - target_link_libraries(t_download_nonostree virtual_secondary) + set_tests_properties(test_download_nonostree PROPERTIES LABELS "noptest") + + add_aktualizr_test(NAME aktualizr_lite + SOURCES aktualizr_lite_test.cc + LIBRARIES uptane_generator_lib + PROJECT_WORKING_DIRECTORY + ARGS ${PROJECT_BINARY_DIR}/ostree_repo) + set_target_properties(t_aktualizr_lite PROPERTIES LINK_FLAGS -Wl,--export-dynamic) + add_dependencies(t_aktualizr_lite make_ostree_sysroot) else (BUILD_OSTREE) - aktualizr_source_file_checks(aktualizr_fullostree_test.cc download_nonostree_test.cc) + aktualizr_source_file_checks(aktualizr_fullostree_test.cc download_nonostree_test.cc aktualizr_lite_test.cc) endif (BUILD_OSTREE) -add_aktualizr_test(NAME reportqueue SOURCES reportqueue_test.cc PROJECT_WORKING_DIRECTORY LIBRARIES PUBLIC uptane_generator_lib) -add_aktualizr_test(NAME empty_targets SOURCES empty_targets_test.cc PROJECT_WORKING_DIRECTORY - ARGS "$" LIBRARIES uptane_generator_lib) -target_link_libraries(t_empty_targets virtual_secondary) -add_aktualizr_test(NAME custom_url SOURCES custom_url_test.cc PROJECT_WORKING_DIRECTORY - ARGS "$" LIBRARIES uptane_generator_lib) -target_link_libraries(t_custom_url virtual_secondary) -add_aktualizr_test(NAME target_mismatch SOURCES target_mismatch_test.cc PROJECT_WORKING_DIRECTORY - ARGS "$" LIBRARIES uptane_generator_lib) -target_link_libraries(t_target_mismatch virtual_secondary) - -add_aktualizr_test(NAME device_cred_prov SOURCES device_cred_prov_test.cc PROJECT_WORKING_DIRECTORY LIBRARIES PUBLIC uptane_generator_lib) +add_aktualizr_test(NAME provisioner + SOURCES provisioner_test.cc + PROJECT_WORKING_DIRECTORY + LIBRARIES PUBLIC uptane_generator_lib provisioner_test_utils) + +add_aktualizr_test(NAME reportqueue + SOURCES reportqueue_test.cc + PROJECT_WORKING_DIRECTORY + LIBRARIES PUBLIC uptane_generator_lib) + +add_aktualizr_test(NAME empty_targets + SOURCES empty_targets_test.cc + PROJECT_WORKING_DIRECTORY + ARGS "$" + LIBRARIES uptane_generator_lib virtual_secondary) + +add_aktualizr_test(NAME custom_url SOURCES custom_url_test.cc + PROJECT_WORKING_DIRECTORY + ARGS "$" + LIBRARIES uptane_generator_lib virtual_secondary) + +add_aktualizr_test(NAME target_mismatch + SOURCES target_mismatch_test.cc + PROJECT_WORKING_DIRECTORY + ARGS "$" + LIBRARIES uptane_generator_lib virtual_secondary) + +add_aktualizr_test(NAME metadata_fetch + SOURCES metadata_fetch_test.cc + PROJECT_WORKING_DIRECTORY + LIBRARIES uptane_generator_lib virtual_secondary) + +add_aktualizr_test(NAME metadata_expiration + SOURCES metadata_expiration_test.cc + PROJECT_WORKING_DIRECTORY + ARGS "$" + LIBRARIES uptane_generator_lib virtual_secondary) + +add_aktualizr_test(NAME device_cred_prov + SOURCES device_cred_prov_test.cc + PROJECT_WORKING_DIRECTORY + LIBRARIES uptane_generator_lib provisioner_test_utils) set_tests_properties(test_device_cred_prov PROPERTIES LABELS "crypto") -add_aktualizr_test(NAME uptane_key SOURCES uptane_key_test.cc PROJECT_WORKING_DIRECTORY LIBRARIES uptane_generator_lib) + +add_aktualizr_test(NAME uptane_key + SOURCES uptane_key_test.cc + PROJECT_WORKING_DIRECTORY + LIBRARIES uptane_generator_lib virtual_secondary) set_tests_properties(test_uptane_key PROPERTIES LABELS "crypto") -target_include_directories(t_uptane_key PUBLIC ${PROJECT_SOURCE_DIR}/src/virtual_secondary) -target_link_libraries(t_uptane_key virtual_secondary) + aktualizr_source_file_checks(${SOURCES} ${HEADERS} ${TEST_SOURCES}) diff --git a/src/libaktualizr/primary/aktualizr.cc b/src/libaktualizr/primary/aktualizr.cc index b978cdb111..8a0a5e9863 100644 --- a/src/libaktualizr/primary/aktualizr.cc +++ b/src/libaktualizr/primary/aktualizr.cc @@ -1,73 +1,66 @@ -#include "aktualizr.h" - #include +#include -#include -#include #include -#include "primary/events.h" +#include "libaktualizr/aktualizr.h" +#include "libaktualizr/events.h" +#include "primary/sotauptaneclient.h" +#include "utilities/apiqueue.h" #include "utilities/timer.h" using std::make_shared; +using std::move; using std::shared_ptr; -Aktualizr::Aktualizr(Config &config) : config_(config) { - systemSetup(); - sig_ = make_shared)>>(); - storage_ = INvStorage::newStorage(config_.storage); - storage_->importData(config_.import); - uptane_client_ = SotaUptaneClient::newDefaultClient(config_, storage_, sig_); -} - -Aktualizr::Aktualizr(Config &config, std::shared_ptr storage_in, std::shared_ptr http_in) - : config_(config) { - systemSetup(); - sig_ = make_shared)>>(); - storage_ = std::move(storage_in); - std::shared_ptr bootloader_in = std::make_shared(config_.bootloader, *storage_); - std::shared_ptr report_queue_in = std::make_shared(config_, http_in); - - uptane_client_ = std::make_shared(config_, storage_, http_in, bootloader_in, report_queue_in, sig_); -} +Aktualizr::Aktualizr(const Config &config) + : Aktualizr(config, INvStorage::newStorage(config.storage), std::make_shared()) {} -void Aktualizr::systemSetup() { +Aktualizr::Aktualizr(Config config, std::shared_ptr storage_in, + const std::shared_ptr &http_in) + : config_{move(config)}, sig_{new event::Channel()}, api_queue_{new api::CommandQueue()} { if (sodium_init() == -1) { // Note that sodium_init doesn't require a matching 'sodium_deinit' throw std::runtime_error("Unable to initialize libsodium"); } - LOG_TRACE << "Seeding random number generator from /dev/urandom..."; - Timer timer; - unsigned int seed; - std::ifstream urandom("/dev/urandom", std::ios::in | std::ios::binary); - urandom.read(reinterpret_cast(&seed), sizeof(seed)); - urandom.close(); - std::srand(seed); // seeds pseudo random generator with random number - LOG_TRACE << "... seeding complete in " << timer; + storage_ = move(storage_in); + storage_->importData(config_.import); + + uptane_client_ = std::make_shared(config_, storage_, http_in, sig_); } +Aktualizr::~Aktualizr() { api_queue_.reset(nullptr); } + void Aktualizr::Initialize() { uptane_client_->initialize(); - api_queue_.run(); + api_queue_->run(); } bool Aktualizr::UptaneCycle() { result::UpdateCheck update_result = CheckUpdates().get(); if (update_result.updates.empty()) { + if (update_result.status == result::UpdateStatus::kError) { + // If the metadata verification failed, inform the backend immediately. + SendManifest().get(); + } return true; } result::Download download_result = Download(update_result.updates).get(); if (download_result.status != result::DownloadStatus::kSuccess || download_result.updates.empty()) { + if (download_result.status != result::DownloadStatus::kNothingToDownload) { + // If the download failed, inform the backend immediately. + SendManifest().get(); + } return true; } Install(download_result.updates).get(); if (uptane_client_->isInstallCompletionRequired()) { - // If there are some pending updates then effectively either reboot (ostree) or aktualizr restart (fake pack mngr) + // If there are some pending updates then effectively either reboot (OSTree) or aktualizr restart (fake pack mngr) // is required to apply the update(s) - LOG_INFO << "About to exit aktualizr so the pending updates can be applied after reboot"; + LOG_INFO << "Exiting aktualizr so that pending updates can be applied after reboot"; return false; } @@ -81,13 +74,22 @@ bool Aktualizr::UptaneCycle() { } std::future Aktualizr::RunForever() { - std::future future = std::async(std::launch::async, [&]() { - SendDeviceData().get(); - + std::future future = std::async(std::launch::async, [this]() { std::unique_lock l(exit_cond_.m); + bool have_sent_device_data = false; while (true) { - if (!UptaneCycle()) { - break; + try { + if (!have_sent_device_data) { + // Can throw SotaUptaneClient::ProvisioningFailed + SendDeviceData().get(); + have_sent_device_data = true; + } + + if (!UptaneCycle()) { + break; + } + } catch (SotaUptaneClient::ProvisioningFailed &e) { + LOG_DEBUG << "Not provisioned yet:" << e.what(); } if (exit_cond_.cv.wait_for(l, std::chrono::seconds(config_.uptane.polling_sec), @@ -108,13 +110,24 @@ void Aktualizr::Shutdown() { exit_cond_.cv.notify_all(); } -void Aktualizr::AddSecondary(const std::shared_ptr &secondary) { - uptane_client_->addNewSecondary(secondary); +void Aktualizr::AddSecondary(const std::shared_ptr &secondary) { + uptane_client_->addSecondary(secondary); +} + +void Aktualizr::SetSecondaryData(const Uptane::EcuSerial &ecu, const std::string &data) { + storage_->saveSecondaryData(ecu, data); +} + +std::vector Aktualizr::GetSecondaries() const { + std::vector info; + storage_->loadSecondariesInfo(&info); + + return info; } std::future Aktualizr::CampaignCheck() { std::function task([this] { return uptane_client_->campaignCheck(); }); - return api_queue_.enqueue(task); + return api_queue_->enqueue(move(task)); } std::future Aktualizr::CampaignControl(const std::string &campaign_id, campaign::Cmd cmd) { @@ -133,37 +146,42 @@ std::future Aktualizr::CampaignControl(const std::string &campaign_id, cam break; } }); - return api_queue_.enqueue(task); + return api_queue_->enqueue(move(task)); } +void Aktualizr::SetCustomHardwareInfo(Json::Value hwinfo) { uptane_client_->setCustomHardwareInfo(move(hwinfo)); } std::future Aktualizr::SendDeviceData() { std::function task([this] { uptane_client_->sendDeviceData(); }); - return api_queue_.enqueue(task); + return api_queue_->enqueue(move(task)); } std::future Aktualizr::CheckUpdates() { std::function task([this] { return uptane_client_->fetchMeta(); }); - return api_queue_.enqueue(task); + return api_queue_->enqueue(move(task)); } std::future Aktualizr::Download(const std::vector &updates) { std::function task( [this, updates](const api::FlowControlToken *token) { return uptane_client_->downloadImages(updates, token); }); - return api_queue_.enqueue(task); + return api_queue_->enqueue(move(task)); } std::future Aktualizr::Install(const std::vector &updates) { std::function task([this, updates] { return uptane_client_->uptaneInstall(updates); }); - return api_queue_.enqueue(task); + return api_queue_->enqueue(move(task)); +} + +bool Aktualizr::SetInstallationRawReport(const std::string &custom_raw_report) { + return storage_->storeDeviceInstallationRawReport(custom_raw_report); } std::future Aktualizr::SendManifest(const Json::Value &custom) { std::function task([this, custom]() { return uptane_client_->putManifest(custom); }); - return api_queue_.enqueue(task); + return api_queue_->enqueue(move(task)); } result::Pause Aktualizr::Pause() { - if (api_queue_.pause(true)) { + if (api_queue_->pause(true)) { uptane_client_->reportPause(); return result::PauseStatus::kSuccess; } else { @@ -172,7 +190,7 @@ result::Pause Aktualizr::Pause() { } result::Pause Aktualizr::Resume() { - if (api_queue_.pause(false)) { + if (api_queue_->pause(false)) { uptane_client_->reportResume(); return result::PauseStatus::kSuccess; } else { @@ -180,7 +198,7 @@ result::Pause Aktualizr::Resume() { } } -void Aktualizr::Abort() { api_queue_.abort(); } +void Aktualizr::Abort() { api_queue_->abort(); } boost::signals2::connection Aktualizr::SetSignalHandler( const std::function)> &handler) { @@ -192,7 +210,7 @@ Aktualizr::InstallationLog Aktualizr::GetInstallationLog() { EcuSerials serials; if (!storage_->loadEcuSerials(&serials)) { - throw std::runtime_error("Could not load ecu serials"); + throw std::runtime_error("Could not load ECU serials"); } ilog.reserve(serials.size()); @@ -203,20 +221,16 @@ Aktualizr::InstallationLog Aktualizr::GetInstallationLog() { std::vector log; storage_->loadInstallationLog(serial.ToString(), &log, true); - ilog.emplace_back(Aktualizr::InstallationLogEntry{serial, std::move(log)}); + ilog.emplace_back(Aktualizr::InstallationLogEntry{serial, move(log)}); } return ilog; } -std::vector Aktualizr::GetStoredTargets() { return storage_->getTargetFiles(); } +std::vector Aktualizr::GetStoredTargets() { return uptane_client_->getStoredTargets(); } -void Aktualizr::DeleteStoredTarget(const Uptane::Target &target) { storage_->removeTargetFile(target.filename()); } +void Aktualizr::DeleteStoredTarget(const Uptane::Target &target) { uptane_client_->deleteStoredTarget(target); } -std::unique_ptr Aktualizr::OpenStoredTarget(const Uptane::Target &target) { - auto handle = storage_->openTargetFile(target); - if (handle->isPartial()) { - throw std::runtime_error("Target was partially downloaded"); - } - return handle; +std::ifstream Aktualizr::OpenStoredTarget(const Uptane::Target &target) { + return uptane_client_->openStoredTarget(target); } diff --git a/src/libaktualizr/primary/aktualizr.h b/src/libaktualizr/primary/aktualizr.h deleted file mode 100644 index 1c9bf2aa34..0000000000 --- a/src/libaktualizr/primary/aktualizr.h +++ /dev/null @@ -1,217 +0,0 @@ -#ifndef AKTUALIZR_H_ -#define AKTUALIZR_H_ - -#include -#include - -#include - -#include "config/config.h" -#include "primary/events.h" -#include "sotauptaneclient.h" -#include "storage/invstorage.h" -#include "uptane/secondaryinterface.h" -#include "utilities/apiqueue.h" - -/** - * This class provides the main APIs necessary for launching and controlling - * libaktualizr. - */ -class Aktualizr { - public: - /** Aktualizr requires a configuration object. Examples can be found in the - * config directory. */ - explicit Aktualizr(Config& config); - Aktualizr(const Aktualizr&) = delete; - Aktualizr& operator=(const Aktualizr&) = delete; - - /** - * Initialize aktualizr. Any secondaries should be added before making this - * call. This will provision with the server if required. This must be called - * before using any other aktualizr functions except AddSecondary. - */ - void Initialize(); - - /** - * Asynchronously run aktualizr indefinitely until Shutdown is called. - * @return Empty std::future object - */ - std::future RunForever(); - - /** - * Shuts down currently running `RunForever()` method - */ - void Shutdown(); - - /** - * Check for campaigns. - * Campaigns are a concept outside of Uptane, and allow for user approval of - * updates before the contents of the update are known. - * @return std::future object with data about available campaigns. - */ - std::future CampaignCheck(); - - /** - * Act on campaign: accept, decline or postpone. - * Accepted campaign will be removed from the campaign list but no guarantee - * is made for declined or postponed items. Applications are responsible for - * tracking their state but this method will notify the server for device - * state monitoring purposes. - * @param campaign_id Campaign ID as provided by CampaignCheck. - * @param cmd action to apply on the campaign: accept, decline or postpone - * @return Empty std::future object - */ - std::future CampaignControl(const std::string& campaign_id, campaign::Cmd cmd); - - /** - * Send local device data to the server. - * This includes network status, installed packages, hardware etc. - * @return Empty std::future object - */ - std::future SendDeviceData(); - - /** - * Fetch Uptane metadata and check for updates. - * This collects a client manifest, PUTs it to the director, updates the - * Uptane metadata (including root and targets), and then checks the metadata - * for target updates. - * @return Information about available updates. - */ - std::future CheckUpdates(); - - /** - * Download targets. - * @param updates Vector of targets to download as provided by CheckUpdates. - * @return std::future object with information about download results. - */ - std::future Download(const std::vector& updates); - - /** - * Get log of installations. The log is indexed for every ECU and contains - * every change of versions ordered by time. It may contain duplicates in - * case of rollbacks. - * @return installation log - */ - struct InstallationLogEntry { - Uptane::EcuSerial ecu; - std::vector installs; - }; - using InstallationLog = std::vector; - InstallationLog GetInstallationLog(); - - /** - * Get list of targets currently in storage. This is intended to be used with - * DeleteStoredTarget and targets are not guaranteed to be verified and - * up-to-date with current metadata. - * @return std::vector of target objects - */ - std::vector GetStoredTargets(); - - /** - * Delete a stored target from storage. This only affects storage of the - * actual binary data and does not preclude a re-download if a target matches - * current metadata. - * @param target Target object matching the desired target in the storage - * @return true if successful - */ - void DeleteStoredTarget(const Uptane::Target& target); - - /** - * Get target downloaded in Download call. Returned target is guaranteed to be verified and up-to-date - * according to the Uptane metadata downloaded in CheckUpdates call. - * @param target Target object matching the desired target in the storage. - * @return Handle to the stored binary. nullptr if none is found. - */ - std::unique_ptr OpenStoredTarget(const Uptane::Target& target); - - /** - * Install targets. - * @param updates Vector of targets to install as provided by CheckUpdates or - * Download. - * @return std::future object with information about installation results. - */ - std::future Install(const std::vector& updates); - - /** - * Send installation report to the backend. - * - * Note that the device manifest is also sent as a part of CheckUpdates and - * SendDeviceData calls, as well as after a reboot if it was initiated - * by Aktualizr as a part of an installation process. - * All these manifests will not include the custom data provided in this call. - * - * @param custom Project-specific data to put in the custom field of Uptane manifest - * @return std::future object with manifest update result (true on success). - */ - std::future SendManifest(const Json::Value& custom = Json::nullValue); - - /** - * Pause the library operations. - * In progress target downloads will be paused and API calls will be deferred. - * - * @return Information about pause results. - */ - result::Pause Pause(); - - /** - * Resume the library operations. - * Target downloads will resume and API calls issued during the pause will - * execute in fifo order. - * - * @return Information about resume results. - */ - result::Pause Resume(); - - /** - * Aborts the currently running command, if it can be aborted, or waits for it - * to finish; then removes all other queued calls. - * This doesn't reset the `Paused` state, i.e. if the queue was previously - * paused, it will remain paused, but with an emptied queue. - * The call is blocking. - */ - void Abort(); - - /** - * Synchronously run an uptane cycle: check for updates, download any new - * targets, install them, and send a manifest back to the server. - * - * @return `false`, if the restart is required to continue, `true` otherwise - */ - bool UptaneCycle(); - - /** - * Add new secondary to aktualizr. Must be called before Initialize. - * @param secondary An object to perform installation on a secondary ECU. - */ - void AddSecondary(const std::shared_ptr& secondary); - - /** - * Provide a function to receive event notifications. - * @param handler a function that can receive event objects. - * @return a signal connection object, which can be disconnected if desired. - */ - boost::signals2::connection SetSignalHandler(const std::function)>& handler); - - private: - Config config_; - - protected: - Aktualizr(Config& config, std::shared_ptr storage_in, std::shared_ptr http_in); - - std::shared_ptr uptane_client_; - - private: - static void systemSetup(); - - struct { - std::mutex m; - std::condition_variable cv; - bool flag = false; - } exit_cond_; - - std::shared_ptr storage_; - std::shared_ptr sig_; - api::CommandQueue api_queue_; -}; - -#endif // AKTUALIZR_H_ diff --git a/src/libaktualizr/primary/aktualizr_fullostree_test.cc b/src/libaktualizr/primary/aktualizr_fullostree_test.cc index a0278fa0a4..3dfabc3105 100644 --- a/src/libaktualizr/primary/aktualizr_fullostree_test.cc +++ b/src/libaktualizr/primary/aktualizr_fullostree_test.cc @@ -7,16 +7,15 @@ #include -#include "uptane_test_common.h" - -#include "config/config.h" +#include "libaktualizr/aktualizr.h" +#include "libaktualizr/config.h" #include "logging/logging.h" #include "package_manager/ostreemanager.h" -#include "primary/aktualizr.h" #include "storage/sqlstorage.h" #include "test_utils.h" +#include "uptane_repo.h" +#include "uptane_test_common.h" -boost::filesystem::path uptane_generator_path; static std::string server = "http://127.0.0.1:"; static std::string treehub_server = "http://127.0.0.1:"; static boost::filesystem::path sysroot; @@ -48,7 +47,7 @@ extern "C" const char *ostree_deployment_get_csum(OstreeDeployment *self) { TEST(Aktualizr, FullOstreeUpdate) { TemporaryDirectory temp_dir; Config conf = UptaneTestCommon::makeTestConfig(temp_dir, server); - conf.pacman.type = PackageManager::kOstree; + conf.pacman.type = PACKAGE_MANAGER_OSTREE; conf.pacman.sysroot = sysroot.string(); conf.pacman.ostree_server = treehub_server; conf.pacman.os = "dummy-os"; @@ -94,15 +93,15 @@ TEST(Aktualizr, FullOstreeUpdate) { // check new version const auto target = aktualizr.uptane_client()->package_manager_->getCurrent(); EXPECT_EQ(target.sha256Hash(), new_rev); - // TODO: verify the target. It doesn't work because + // TODO(OTA-3659): verify the target. It doesn't work because // ostree_repo_list_commit_objects_starting_with() doesn't find the commit. // The already mocked functions are not enough to do this; it seems the - // commit is not written with the correct hash. See OTA-3659. + // commit is not written with the correct hash. // Verify a bogus target is not present. Uptane::EcuMap primary_ecu{{Uptane::EcuSerial(conf.provision.primary_ecu_serial), Uptane::HardwareIdentifier(conf.provision.primary_ecu_hardware_id)}}; - Uptane::Target target_bad("some-pkg", primary_ecu, {Uptane::Hash(Uptane::Hash::Type::kSha256, "hash-bad")}, 4, ""); + Uptane::Target target_bad("some-pkg", primary_ecu, {Hash(Hash::Type::kSha256, "hash-bad")}, 4, ""); EXPECT_EQ(aktualizr.uptane_client()->package_manager_->verifyTarget(target_bad), TargetStatus::kNotFound); } } @@ -113,12 +112,10 @@ int main(int argc, char **argv) { logger_init(); - if (argc != 3) { - std::cerr << "Error: " << argv[0] << " requires the path to the uptane-generator utility " - << "and an OSTree sysroot\n"; + if (argc != 2) { + std::cerr << "Error: " << argv[0] << " requires the path to an OSTree sysroot\n"; return EXIT_FAILURE; } - uptane_generator_path = argv[1]; Process ostree("ostree"); @@ -126,7 +123,7 @@ int main(int argc, char **argv) { TemporaryDirectory temp_sysroot; sysroot = temp_sysroot / "sysroot"; // uses cp, as boost doesn't like to copy bad symlinks - int res = system((std::string("cp -r ") + argv[2] + std::string(" ") + sysroot.string()).c_str()); + int res = system((std::string("cp -r ") + argv[1] + std::string(" ") + sysroot.string()).c_str()); if (res != 0) { return -1; } @@ -160,14 +157,13 @@ int main(int argc, char **argv) { boost::trim_if(new_rev, boost::is_any_of(" \t\r\n")); LOG_INFO << "DEST: " << new_rev; - Process uptane_gen(uptane_generator_path.string()); - uptane_gen.run({"generate", "--path", meta_dir.PathString(), "--correlationid", "abc123"}); - uptane_gen.run({"image", "--path", meta_dir.PathString(), "--targetname", "update_1.0", "--targetsha256", new_rev, - "--targetlength", "0", "--targetformat", "OSTREE", "--hwid", "primary_hw"}); - uptane_gen.run({"addtarget", "--path", meta_dir.PathString(), "--targetname", "update_1.0", "--hwid", "primary_hw", - "--serial", "CA:FE:A6:D2:84:9D"}); - uptane_gen.run({"signtargets", "--path", meta_dir.PathString(), "--correlationid", "abc123"}); - LOG_INFO << uptane_gen.lastStdOut(); + const std::string hwid{"primary_hw"}; + const std::string target_name{"update_1.0"}; + UptaneRepo uptane_repo{meta_dir.PathString(), "", "abc123"}; + uptane_repo.generateRepo(KeyType::kED25519); + uptane_repo.addCustomImage(target_name, Hash(Hash::Type::kSha256, new_rev), 0, hwid); + uptane_repo.addTarget(target_name, hwid, "CA:FE:A6:D2:84:9D"); + uptane_repo.signTargets(); return RUN_ALL_TESTS(); } diff --git a/src/libaktualizr/primary/aktualizr_helpers.cc b/src/libaktualizr/primary/aktualizr_helpers.cc index 0697f33d64..cb18212435 100644 --- a/src/libaktualizr/primary/aktualizr_helpers.cc +++ b/src/libaktualizr/primary/aktualizr_helpers.cc @@ -1,7 +1,13 @@ -#include - #include "aktualizr_helpers.h" +#include // for find_if +#include // for size_t +#include // for operator==, string +#include // for vector, vector<>::reference, _Bi... +#include "libaktualizr/aktualizr.h" // for Aktualizr, Aktualizr::Installati... +#include "libaktualizr/events.h" // for AllInstallsComplete, BaseEvent +#include "libaktualizr/types.h" // for Target + void targets_autoclean_cb(Aktualizr &aktualizr, const std::shared_ptr &event) { if (!event->isTypeOf()) { return; @@ -12,12 +18,12 @@ void targets_autoclean_cb(Aktualizr &aktualizr, const std::shared_ptr= 2 ? entry.installs.end() - 2 : entry.installs.begin(); for (auto it = start; it != entry.installs.end(); it++) { auto fit = std::find_if(installed_targets.begin(), installed_targets.end(), - [&it](const Uptane::Target &t2) { return it->sha256Hash() == t2.sha256Hash(); }); + [&it](const Uptane::Target &t2) { return it->filename() == t2.filename(); }); if (fit == installed_targets.end()) { continue; diff --git a/src/libaktualizr/primary/aktualizr_helpers.h b/src/libaktualizr/primary/aktualizr_helpers.h index 30e72b2143..9b099fbda8 100644 --- a/src/libaktualizr/primary/aktualizr_helpers.h +++ b/src/libaktualizr/primary/aktualizr_helpers.h @@ -2,7 +2,11 @@ #define AKTUALIZR_HELPERS_H_ #include -#include "aktualizr.h" + +class Aktualizr; +namespace event { +class BaseEvent; +} /* * Signal handler to remove old targets just after an installation completes diff --git a/src/libaktualizr/primary/aktualizr_lite_test.cc b/src/libaktualizr/primary/aktualizr_lite_test.cc new file mode 100644 index 0000000000..02449e1449 --- /dev/null +++ b/src/libaktualizr/primary/aktualizr_lite_test.cc @@ -0,0 +1,403 @@ +#include + +#include +#include +#include + +#include + +#include "http/httpclient.h" +#include "image_repo.h" +#include "libaktualizr/config.h" +#include "logging/logging.h" +#include "package_manager/ostreemanager.h" +#include "storage/sqlstorage.h" +#include "test_utils.h" +#include "uptane/fetcher.h" +#include "uptane/imagerepository.h" + +class TufRepoMock { + public: + TufRepoMock(const boost::filesystem::path& root_dir, std::string expires = "", + std::string correlation_id = "corellatio-id") + : repo_{root_dir, expires, correlation_id}, + port_{TestUtils::getFreePort()}, + url_{"http://localhost:" + port_}, + process_{"tests/fake_http_server/fake_test_server.py", port_, "-m", root_dir} { + repo_.generateRepo(KeyType::kED25519); + TestUtils::waitForServer(url_ + "/"); + } + ~TufRepoMock() { + process_.terminate(); + process_.wait_for(std::chrono::seconds(10)); + } + + public: + const std::string& url() { return url_; } + + Uptane::Target add_target(const std::string& target_name, const std::string& hash, const std::string& hardware_id) { + repo_.addCustomImage(target_name, Hash(Hash::Type::kSha256, hash), 0, hardware_id); + + Json::Value target; + target["length"] = 0; + target["hashes"]["sha256"] = hash; + target["custom"]["targetFormat"] = "OSTREE"; + return Uptane::Target(target_name, target); + } + + private: + ImageRepo repo_; + std::string port_; + std::string url_; + boost::process::child process_; +}; + +class Treehub { + public: + Treehub(const std::string& root_dir) + : root_dir_{root_dir}, + port_{TestUtils::getFreePort()}, + url_{"http://localhost:" + port_}, + process_{"tests/sota_tools/treehub_server.py", + std::string("-p"), + port_, + std::string("-d"), + root_dir, + std::string("-s0.5"), + std::string("--create"), + std::string("--system")} { + std::this_thread::sleep_for(std::chrono::seconds(2)); + TestUtils::waitForServer(url_ + "/"); + } + + ~Treehub() { + process_.terminate(); + process_.wait_for(std::chrono::seconds(10)); + } + + const std::string& url() { return url_; } + + std::string getRev() { + Process ostree_process{"ostree"}; + Process::Result result = ostree_process.run({"rev-parse", std::string("--repo"), root_dir_, "master"}); + if (std::get<0>(result) != 0) { + throw std::runtime_error("Failed to get the current ostree revision in Treehub: " + std::get<2>(result)); + } + auto res_rev = std::get<1>(result); + boost::trim_if(res_rev, boost::is_any_of(" \t\r\n")); + return res_rev; + } + + private: + const std::string root_dir_; + std::string port_; + std::string url_; + boost::process::child process_; +}; + +class ComposeAppPackManMock : public OstreeManager { + public: + ComposeAppPackManMock(const PackageConfig& pconfig, const BootloaderConfig& bconfig, + const std::shared_ptr& storage, const std::shared_ptr& http) + : OstreeManager(pconfig, bconfig, storage, http) { + sysroot_ = LoadSysroot(pconfig.sysroot); + } + + std::string getCurrentHash() const override { + g_autoptr(GPtrArray) deployments = nullptr; + OstreeDeployment* deployment = nullptr; + + deployments = ostree_sysroot_get_deployments(sysroot_.get()); + if (deployments != nullptr && deployments->len > 0) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + deployment = static_cast(deployments->pdata[0]); + } + auto hash = ostree_deployment_get_csum(deployment); + return hash; + } + + data::InstallationResult finalizeInstall(const Uptane::Target& target) override { + auto cur_deployed_hash = getCurrentHash(); + + if (target.sha256Hash() == cur_deployed_hash) { + storage_->saveInstalledVersion("", target, InstalledVersionUpdateMode::kCurrent); + return data::InstallationResult{data::ResultCode::Numeric::kOk, "Update has been successfully applied"}; + } + return data::InstallationResult{data::ResultCode::Numeric::kInstallFailed, "Update has failed"}; + } + + private: + GObjectUniquePtr sysroot_; +}; + +class AkliteMock { + public: + AkliteMock(const AkliteMock&) = delete; + AkliteMock(const Config& config) + : storage_{INvStorage::newStorage(config.storage)}, + key_manager_{std_::make_unique(storage_, config.keymanagerConfig())}, + http_client_{std::make_shared()}, + package_manager_{ + std::make_shared(config.pacman, config.bootloader, storage_, http_client_)}, + fetcher_{std::make_shared(config, http_client_)} { + finalizeIfNeeded(); + } + ~AkliteMock() {} + + public: + data::InstallationResult update() { + image_repo_.updateMeta(*storage_, *fetcher_); + image_repo_.checkMetaOffline(*storage_); + + std::shared_ptr targets = image_repo_.getTargets(); + + if (0 == targets->targets.size()) { + return data::InstallationResult(data::ResultCode::Numeric::kAlreadyProcessed, + "Target has been already processed"); + } + + Uptane::Target target{targets->targets[0]}; + + if (TargetStatus::kNotFound != package_manager_->verifyTarget(target)) { + return data::InstallationResult(data::ResultCode::Numeric::kAlreadyProcessed, + "Target has been already processed"); + } + + if (isTargetCurrent(target)) { + return data::InstallationResult(data::ResultCode::Numeric::kAlreadyProcessed, + "Target has been already processed"); + } + + if (!package_manager_->fetchTarget(target, *fetcher_, *key_manager_, nullptr, nullptr)) { + return data::InstallationResult(data::ResultCode::Numeric::kDownloadFailed, "Failed to download Target"); + } + + if (TargetStatus::kGood != package_manager_->verifyTarget(target)) { + return data::InstallationResult(data::ResultCode::Numeric::kVerificationFailed, "Target is invalid"); + } + + const auto install_result = package_manager_->install(target); + if (install_result.result_code.num_code == data::ResultCode::Numeric::kNeedCompletion) { + storage_->savePrimaryInstalledVersion(target, InstalledVersionUpdateMode::kPending); + } else if (install_result.result_code.num_code == data::ResultCode::Numeric::kOk) { + storage_->savePrimaryInstalledVersion(target, InstalledVersionUpdateMode::kCurrent); + } + + return install_result; + } + + bool isTargetCurrent(const Uptane::Target& target) { + const auto cur_target = package_manager_->getCurrent(); + + if (cur_target.type() != target.type() || target.type() != "OSTREE") { + LOG_ERROR << "Target formats mismatch: " << cur_target.type() << " != " << target.type(); + return false; + } + + if (cur_target.length() != target.length()) { + LOG_INFO << "Target names differ " << cur_target.filename() << " != " << target.filename(); + return false; + } + + if (cur_target.filename() != target.filename()) { + LOG_INFO << "Target names differ " << cur_target.filename() << " != " << target.filename(); + return false; + } + + if (cur_target.sha256Hash() != target.sha256Hash()) { + LOG_ERROR << "Target hashes differ " << cur_target.sha256Hash() << " != " << target.sha256Hash(); + return false; + } + + return true; + } + + data::InstallationResult finalizeIfNeeded() { + boost::optional pending_version; + storage_->loadInstalledVersions("", nullptr, &pending_version); + if (!!pending_version) { + return package_manager_->finalizeInstall(*pending_version); + } + return data::InstallationResult{data::ResultCode::Numeric::kAlreadyProcessed, "Already installed"}; + } + + private: + std::shared_ptr storage_; + std::unique_ptr key_manager_; + std::shared_ptr http_client_; + std::shared_ptr package_manager_; + std::shared_ptr fetcher_; + Uptane::ImageRepository image_repo_; + + FRIEND_TEST(AkliteTest, hashMismatchLogsTest); +}; + +class AkliteTest : public ::testing::Test { + public: + static std::string SysRootSrc; + + protected: + AkliteTest() + : tuf_repo_{test_dir_.Path() / "repo"}, + treehub_{(test_dir_.Path() / "treehub").string()}, + sysroot_path_{test_dir_.Path() / "sysroot"} { + const auto sysroot_cmd = std::string("cp -r ") + SysRootSrc + std::string(" ") + sysroot_path_.string(); + if (0 != system(sysroot_cmd.c_str())) { + throw std::runtime_error("Failed to copy a system rootfs"); + } + + conf_.uptane.repo_server = tufRepo().url() + "/repo"; + conf_.provision.primary_ecu_hardware_id = "primary_hw"; + conf_.pacman.type = PACKAGE_MANAGER_OSTREE; + conf_.pacman.sysroot = sysrootPath(); + conf_.pacman.ostree_server = treehub().url(); + conf_.pacman.os = "dummy-os"; + conf_.storage.path = test_dir_.Path(); + } + + TufRepoMock& tufRepo() { return tuf_repo_; } + Treehub& treehub() { return treehub_; } + boost::filesystem::path& sysrootPath() { return sysroot_path_; } + Config& conf() { return conf_; } + + static void corruptStoredMetadata(std::shared_ptr storage, const Uptane::Role& role) { + std::string metadata_stored; + EXPECT_TRUE(storage->loadNonRoot(&metadata_stored, Uptane::RepositoryType::Image(), role)); + logger_set_threshold(boost::log::trivial::error); + EXPECT_EQ('{', metadata_stored[0]); + metadata_stored[0] = '['; + storage->storeNonRoot(metadata_stored, Uptane::RepositoryType::Image(), role); + } + + private: + TemporaryDirectory test_dir_; + TufRepoMock tuf_repo_; + Treehub treehub_; + boost::filesystem::path sysroot_path_; + Config conf_; +}; + +std::string AkliteTest::SysRootSrc; + +/* + * Test that mimics aktualizr-lite + * + * It makes use of libaktualizr's components and makes API calls to them in the way as aktualiz-lite + * would do during its regular update cycle. + */ +TEST_F(AkliteTest, ostreeUpdate) { + const auto target_to_install = tufRepo().add_target("target-01", treehub().getRev(), "primary_hw"); + { + AkliteMock aklite{conf()}; + const auto update_result = aklite.update(); + ASSERT_EQ(update_result.result_code.num_code, data::ResultCode::Numeric::kNeedCompletion); + } + // reboot emulation by destroing and creating of a new AkliteMock instance + { + AkliteMock aklite{conf()}; + ASSERT_TRUE(aklite.isTargetCurrent(target_to_install)); + } +} + +/* + * Test that verifies if metadata files are being stored when there are changes and also not being stored + * when the files have not changed. + */ +TEST_F(AkliteTest, timestampStoreLogsTest) { + AkliteMock aklite{conf()}; + std::string log_output; + tufRepo().add_target("target-01", treehub().getRev(), "primary_hw"); + + // On first update, metadata is expected to be stored + testing::internal::CaptureStdout(); + logger_set_threshold(boost::log::trivial::debug); + aklite.update(); + log_output = testing::internal::GetCapturedStdout(); + EXPECT_NE(std::string::npos, log_output.find("Storing timestamp for image repo")); + EXPECT_NE(std::string::npos, log_output.find("Storing snapshot for image repo")); + EXPECT_NE(std::string::npos, log_output.find("Storing targets for image repo")); + + // If there were no changes, no metadata should be stored + testing::internal::CaptureStdout(); + logger_set_threshold(boost::log::trivial::debug); + aklite.update(); + log_output = testing::internal::GetCapturedStdout(); + EXPECT_EQ(std::string::npos, log_output.find("Storing timestamp for image repo")); + EXPECT_EQ(std::string::npos, log_output.find("Storing snapshot for image repo")); + EXPECT_EQ(std::string::npos, log_output.find("Storing targets for image repo")); +} + +/* + * Test that verifies if hash verification failures for targets and snapshot have the expected severity: + * - Debug when a hash mismatch is expected, e.g., when snapshot hashes change inside the timestamp metadata; + * - Error when the hash mismatch is not expected + */ +TEST_F(AkliteTest, hashMismatchLogsTest) { + AkliteMock aklite{conf()}; + std::string log_output; + tufRepo().add_target("target-01", treehub().getRev(), "primary_hw"); + aklite.update(); + + // First, verify error output + logger_set_threshold(boost::log::trivial::error); + + // Corrupt stored targets metadata and verify that the expected errors messages are generated + corruptStoredMetadata(aklite.storage_, Uptane::Role::Targets()); + testing::internal::CaptureStdout(); + aklite.update(); + log_output = testing::internal::GetCapturedStdout(); + EXPECT_NE(std::string::npos, + log_output.find("Signature verification for Image repo Targets metadata failed: Hash metadata mismatch")); + EXPECT_NE(std::string::npos, log_output.find("Image repo Target verification failed: Hash metadata mismatch")); + + // Corrupt stored snapshot metadata and verify that the expected error message is generated + corruptStoredMetadata(aklite.storage_, Uptane::Role::Snapshot()); + testing::internal::CaptureStdout(); + aklite.update(); + log_output = testing::internal::GetCapturedStdout(); + EXPECT_NE(std::string::npos, + log_output.find("Image repo Snapshot verification failed: Snapshot metadata hash verification failed")); + + // No metadata corruption: no errors should be generated + testing::internal::CaptureStdout(); + aklite.update(); + log_output = testing::internal::GetCapturedStdout(); + EXPECT_TRUE(log_output.empty()); + + // Targets updated, no metadata corruption: no errors should be generated + tufRepo().add_target("target-02", treehub().getRev(), "primary_hw"); + testing::internal::CaptureStdout(); + aklite.update(); + log_output = testing::internal::GetCapturedStdout(); + EXPECT_TRUE(log_output.empty()); + + // Verify debug output + logger_set_threshold(boost::log::trivial::debug); + + // Targets updated, no metadata corruption: verify expected debug messages + tufRepo().add_target("target-03", treehub().getRev(), "primary_hw"); + testing::internal::CaptureStdout(); + aklite.update(); + log_output = testing::internal::GetCapturedStdout(); + EXPECT_NE(std::string::npos, + log_output.find("Signature verification for Image repo Targets metadata failed: Hash metadata mismatch")); + EXPECT_NE(std::string::npos, log_output.find("Image repo Target verification failed: Hash metadata mismatch")); + EXPECT_NE(std::string::npos, + log_output.find("Image repo Snapshot verification failed: Snapshot metadata hash verification failed")); +} + +#ifndef __NO_MAIN__ +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + + logger_init(); + + if (argc != 2) { + std::cerr << "Error: " << argv[0] << " requires the path to the test OSTree sysroot\n"; + return EXIT_FAILURE; + } + AkliteTest::SysRootSrc = argv[1]; + + return RUN_ALL_TESTS(); +} +#endif // __NO_MAIN__ diff --git a/src/libaktualizr/primary/aktualizr_test.cc b/src/libaktualizr/primary/aktualizr_test.cc index b0fd77ac49..98212b0417 100644 --- a/src/libaktualizr/primary/aktualizr_test.cc +++ b/src/libaktualizr/primary/aktualizr_test.cc @@ -9,11 +9,12 @@ #include #include "json/json.h" -#include "config/config.h" +#include "libaktualizr/aktualizr.h" +#include "libaktualizr/config.h" +#include "libaktualizr/events.h" + #include "httpfake.h" -#include "primary/aktualizr.h" #include "primary/aktualizr_helpers.h" -#include "primary/events.h" #include "primary/sotauptaneclient.h" #include "uptane_test_common.h" #include "utilities/utils.h" @@ -25,7 +26,7 @@ boost::filesystem::path uptane_repos_dir; boost::filesystem::path fake_meta_dir; void verifyNothingInstalled(const Json::Value& manifest) { - // Verify nothing has installed for the primary. + // Verify nothing has installed for the Primary. EXPECT_EQ( manifest["ecu_version_manifests"]["CA:FE:A6:D2:84:9D"]["signed"]["custom"]["operation_result"]["id"].asString(), ""); @@ -39,28 +40,12 @@ void verifyNothingInstalled(const Json::Value& manifest) { ""); EXPECT_EQ(manifest["ecu_version_manifests"]["CA:FE:A6:D2:84:9D"]["signed"]["installed_image"]["filepath"].asString(), "unknown"); - // Verify nothing has installed for the secondary. + // Verify nothing has installed for the Secondary. EXPECT_EQ( manifest["ecu_version_manifests"]["secondary_ecu_serial"]["signed"]["installed_image"]["filepath"].asString(), "noimage"); } -static Primary::VirtualSecondaryConfig virtual_configuration(const boost::filesystem::path& client_dir) { - Primary::VirtualSecondaryConfig ecu_config; - - ecu_config.partial_verifying = false; - ecu_config.full_client_dir = client_dir; - ecu_config.ecu_serial = "ecuserial3"; - ecu_config.ecu_hardware_id = "hw_id3"; - ecu_config.ecu_private_key = "sec.priv"; - ecu_config.ecu_public_key = "sec.pub"; - ecu_config.firmware_path = client_dir / "firmware.txt"; - ecu_config.target_name_path = client_dir / "firmware_name.txt"; - ecu_config.metadata_path = client_dir / "secondary_metadata"; - - return ecu_config; -} - /* * Initialize -> UptaneCycle -> no updates -> no further action or events. */ @@ -86,7 +71,7 @@ TEST(Aktualizr, FullNoUpdates) { LOG_INFO << "Got " << event->variant; switch (ev_state.num_events) { case 0: { - EXPECT_EQ(event->variant, "UpdateCheckComplete"); + ASSERT_EQ(event->variant, "UpdateCheckComplete"); const auto targets_event = dynamic_cast(event.get()); EXPECT_EQ(targets_event->result.ecus_count, 0); EXPECT_EQ(targets_event->result.updates.size(), 0); @@ -94,7 +79,7 @@ TEST(Aktualizr, FullNoUpdates) { break; } case 1: { - EXPECT_EQ(event->variant, "UpdateCheckComplete"); + ASSERT_EQ(event->variant, "UpdateCheckComplete"); const auto targets_event = dynamic_cast(event.get()); EXPECT_EQ(targets_event->result.ecus_count, 0); EXPECT_EQ(targets_event->result.updates.size(), 0); @@ -107,7 +92,7 @@ TEST(Aktualizr, FullNoUpdates) { FAIL() << "Unexpected events!"; default: std::cout << "event #" << ev_state.num_events << " is: " << event->variant << "\n"; - EXPECT_EQ(event->variant, ""); + ASSERT_EQ(event->variant, ""); } ++ev_state.num_events; }; @@ -127,44 +112,8 @@ TEST(Aktualizr, FullNoUpdates) { } /* - * Add secondaries via API - */ -TEST(Aktualizr, AddSecondary) { - TemporaryDirectory temp_dir; - auto http = std::make_shared(temp_dir.Path(), "noupdates", fake_meta_dir); - Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); - - auto storage = INvStorage::newStorage(conf.storage); - UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); - - Primary::VirtualSecondaryConfig ecu_config = virtual_configuration(temp_dir.Path()); - - aktualizr.AddSecondary(std::make_shared(ecu_config)); - - aktualizr.Initialize(); - - EcuSerials serials; - storage->loadEcuSerials(&serials); - - std::vector expected_ecus = {"CA:FE:A6:D2:84:9D", "ecuserial3", "secondary_ecu_serial"}; - EXPECT_EQ(serials.size(), 3); - for (const auto& ecu : serials) { - auto found = std::find(expected_ecus.begin(), expected_ecus.end(), ecu.first.ToString()); - if (found != expected_ecus.end()) { - expected_ecus.erase(found); - } else { - FAIL() << "Unknown ecu: " << ecu.first.ToString(); - } - } - EXPECT_EQ(expected_ecus.size(), 0); - - ecu_config.ecu_serial = "ecuserial4"; - auto sec4 = std::make_shared(ecu_config); - EXPECT_THROW(aktualizr.AddSecondary(sec4), std::logic_error); -} - -/* - * Compute device installation failure code as concatenation of ECU failure codes. + * Compute device installation failure code as concatenation of ECU failure + * codes during installation. */ TEST(Aktualizr, DeviceInstallationResult) { TemporaryDirectory temp_dir; @@ -172,28 +121,25 @@ TEST(Aktualizr, DeviceInstallationResult) { Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); auto storage = INvStorage::newStorage(conf.storage); - EcuSerials serials{ - {Uptane::EcuSerial("primary"), Uptane::HardwareIdentifier("primary_hw")}, + {Uptane::EcuSerial("CA:FE:A6:D2:84:9D"), Uptane::HardwareIdentifier("primary_hw")}, + {Uptane::EcuSerial("secondary_ecu_serial"), Uptane::HardwareIdentifier("secondary_hw")}, {Uptane::EcuSerial("ecuserial3"), Uptane::HardwareIdentifier("hw_id3")}, }; storage->storeEcuSerials(serials); UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); - - Primary::VirtualSecondaryConfig ecu_config = virtual_configuration(temp_dir.Path()); - + Primary::VirtualSecondaryConfig ecu_config = UptaneTestCommon::altVirtualConfiguration(temp_dir.Path()); aktualizr.AddSecondary(std::make_shared(ecu_config)); - aktualizr.Initialize(); storage->saveEcuInstallationResult(Uptane::EcuSerial("ecuserial3"), data::InstallationResult()); - storage->saveEcuInstallationResult(Uptane::EcuSerial("primary"), data::InstallationResult()); - storage->saveEcuInstallationResult(Uptane::EcuSerial("primary"), + storage->saveEcuInstallationResult(Uptane::EcuSerial("CA:FE:A6:D2:84:9D"), data::InstallationResult()); + storage->saveEcuInstallationResult(Uptane::EcuSerial("CA:FE:A6:D2:84:9D"), data::InstallationResult(data::ResultCode::Numeric::kInstallFailed, "")); data::InstallationResult result; - aktualizr.uptane_client()->computeDeviceInstallationResult(&result, "correlation_id"); + aktualizr.uptane_client()->computeDeviceInstallationResult(&result, nullptr); auto res_json = result.toJson(); EXPECT_EQ(res_json["code"].asString(), "primary_hw:INSTALL_FAILED"); EXPECT_EQ(res_json["success"], false); @@ -201,12 +147,62 @@ TEST(Aktualizr, DeviceInstallationResult) { storage->saveEcuInstallationResult( Uptane::EcuSerial("ecuserial3"), data::InstallationResult(data::ResultCode(data::ResultCode::Numeric::kInstallFailed, "SECOND_FAIL"), "")); - aktualizr.uptane_client()->computeDeviceInstallationResult(&result, "correlation_id"); + aktualizr.uptane_client()->computeDeviceInstallationResult(&result, nullptr); res_json = result.toJson(); EXPECT_EQ(res_json["code"].asString(), "primary_hw:INSTALL_FAILED|hw_id3:SECOND_FAIL"); EXPECT_EQ(res_json["success"], false); } +#ifdef FIU_ENABLE + +/* + * Compute device installation failure code as concatenation of ECU failure + * codes from sending metadata to Secondaries. + */ +TEST(Aktualizr, DeviceInstallationResultMetadata) { + TemporaryDirectory temp_dir; + // Use "hasupdates" to make sure Image repo Root gets fetched, despite that we + // won't use the default update. + auto http = std::make_shared(temp_dir.Path(), "hasupdates", fake_meta_dir); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + UptaneTestCommon::addDefaultSecondary(conf, temp_dir, "sec_serial1", "sec_hw1"); + UptaneTestCommon::addDefaultSecondary(conf, temp_dir, "sec_serial2", "sec_hw2"); + UptaneTestCommon::addDefaultSecondary(conf, temp_dir, "sec_serial3", "sec_hw3"); + + auto storage = INvStorage::newStorage(conf.storage); + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + aktualizr.Initialize(); + auto update_result = aktualizr.CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kUpdatesAvailable); + + fault_injection_init(); + fiu_enable("secondary_putmetadata", 1, nullptr, 0); + + // Try updating two Secondaries; leave the third one alone. + std::vector targets; + Json::Value target_json; + target_json["custom"]["targetFormat"] = "BINARY"; + target_json["custom"]["ecuIdentifiers"]["sec_serial1"]["hardwareId"] = "sec_hw1"; + target_json["custom"]["ecuIdentifiers"]["sec_serial3"]["hardwareId"] = "sec_hw3"; + targets.emplace_back(Uptane::Target("test", target_json)); + + data::InstallationResult result; + aktualizr.uptane_client()->sendMetadataToEcus(targets, &result, nullptr); + auto res_json = result.toJson(); + EXPECT_EQ(res_json["code"].asString(), "sec_hw1:VERIFICATION_FAILED|sec_hw3:VERIFICATION_FAILED"); + EXPECT_EQ(res_json["success"], false); + + fiu_disable("secondary_putmetadata"); + + // Retry after disabling fault injection to verify the test. + aktualizr.uptane_client()->sendMetadataToEcus(targets, &result, nullptr); + res_json = result.toJson(); + EXPECT_EQ(res_json["code"].asString(), "OK"); + EXPECT_EQ(res_json["success"], true); +} + +#endif // FIU_ENABLE + class HttpFakeEventCounter : public HttpFake { public: HttpFakeEventCounter(const boost::filesystem::path& test_dir_in, const boost::filesystem::path& meta_dir_in) @@ -238,8 +234,8 @@ class HttpFakeEventCounter : public HttpFake { }; /* - * Initialize -> UptaneCycle -> updates downloaded and installed for primary and - * secondary. + * Initialize -> UptaneCycle -> updates downloaded and installed for Primary and + * Secondary. */ TEST(Aktualizr, FullWithUpdates) { TemporaryDirectory temp_dir; @@ -263,7 +259,7 @@ TEST(Aktualizr, FullWithUpdates) { LOG_INFO << "Got " << event->variant; switch (ev_state.num_events) { case 0: { - EXPECT_EQ(event->variant, "UpdateCheckComplete"); + ASSERT_EQ(event->variant, "UpdateCheckComplete"); const auto targets_event = dynamic_cast(event.get()); EXPECT_EQ(targets_event->result.ecus_count, 2); EXPECT_EQ(targets_event->result.updates.size(), 2u); @@ -274,7 +270,7 @@ TEST(Aktualizr, FullWithUpdates) { } case 1: case 2: { - EXPECT_EQ(event->variant, "DownloadTargetComplete"); + ASSERT_EQ(event->variant, "DownloadTargetComplete"); const auto download_event = dynamic_cast(event.get()); EXPECT_TRUE(download_event->update.filename() == "primary_firmware.txt" || download_event->update.filename() == "secondary_firmware.txt"); @@ -282,7 +278,7 @@ TEST(Aktualizr, FullWithUpdates) { break; } case 3: { - EXPECT_EQ(event->variant, "AllDownloadsComplete"); + ASSERT_EQ(event->variant, "AllDownloadsComplete"); const auto downloads_complete = dynamic_cast(event.get()); EXPECT_EQ(downloads_complete->result.updates.size(), 2); EXPECT_TRUE(downloads_complete->result.updates[0].filename() == "primary_firmware.txt" || @@ -295,35 +291,35 @@ TEST(Aktualizr, FullWithUpdates) { case 4: { // Primary always gets installed first. (Not a requirement, just how it // works at present.) - EXPECT_EQ(event->variant, "InstallStarted"); + ASSERT_EQ(event->variant, "InstallStarted"); const auto install_started = dynamic_cast(event.get()); EXPECT_EQ(install_started->serial.ToString(), "CA:FE:A6:D2:84:9D"); break; } case 5: { - // Primary should complete before secondary begins. (Again not a + // Primary should complete before Secondary begins. (Again not a // requirement per se.) - EXPECT_EQ(event->variant, "InstallTargetComplete"); + ASSERT_EQ(event->variant, "InstallTargetComplete"); const auto install_complete = dynamic_cast(event.get()); EXPECT_EQ(install_complete->serial.ToString(), "CA:FE:A6:D2:84:9D"); EXPECT_TRUE(install_complete->success); break; } case 6: { - EXPECT_EQ(event->variant, "InstallStarted"); + ASSERT_EQ(event->variant, "InstallStarted"); const auto install_started = dynamic_cast(event.get()); EXPECT_EQ(install_started->serial.ToString(), "secondary_ecu_serial"); break; } case 7: { - EXPECT_EQ(event->variant, "InstallTargetComplete"); + ASSERT_EQ(event->variant, "InstallTargetComplete"); const auto install_complete = dynamic_cast(event.get()); EXPECT_EQ(install_complete->serial.ToString(), "secondary_ecu_serial"); EXPECT_TRUE(install_complete->success); break; } case 8: { - EXPECT_EQ(event->variant, "AllInstallsComplete"); + ASSERT_EQ(event->variant, "AllInstallsComplete"); const auto installs_complete = dynamic_cast(event.get()); EXPECT_EQ(installs_complete->result.ecu_reports.size(), 2); EXPECT_EQ(installs_complete->result.ecu_reports[0].install_res.result_code.num_code, @@ -333,7 +329,7 @@ TEST(Aktualizr, FullWithUpdates) { break; } case 9: { - EXPECT_EQ(event->variant, "PutManifestComplete"); + ASSERT_EQ(event->variant, "PutManifestComplete"); const auto put_complete = dynamic_cast(event.get()); EXPECT_TRUE(put_complete->success); ev_state.promise.set_value(); @@ -344,7 +340,7 @@ TEST(Aktualizr, FullWithUpdates) { FAIL() << "Unexpected events!"; default: std::cout << "event #" << ev_state.num_events << " is: " << event->variant << "\n"; - EXPECT_EQ(event->variant, ""); + ASSERT_EQ(event->variant, ""); } ++ev_state.num_events; }; @@ -359,6 +355,211 @@ TEST(Aktualizr, FullWithUpdates) { EXPECT_EQ(http->events_seen, 8); } +class HttpFakeSplit : public HttpFake { + public: + HttpFakeSplit(const boost::filesystem::path& test_dir_in, const boost::filesystem::path& meta_dir_in) + : HttpFake(test_dir_in, "hasupdates", meta_dir_in) {} + + HttpResponse handle_event(const std::string& url, const Json::Value& data) override { + (void)url; + for (const Json::Value& event : data) { + ++events_seen; + std::string event_type = event["eventType"]["id"].asString(); + if (event_type.find("Ecu") == 0) { + EXPECT_EQ(event["event"]["correlationId"], "id0"); + } + + std::cout << "got event #" << events_seen << ": " << event_type << "\n"; + switch (events_seen) { + case 1: + case 5: + EXPECT_TRUE(event_type == "EcuDownloadStarted"); + break; + case 2: + case 6: + EXPECT_TRUE(event_type == "EcuDownloadCompleted"); + break; + case 3: + case 7: + EXPECT_TRUE(event_type == "EcuInstallationStarted"); + break; + case 4: + case 8: + EXPECT_TRUE(event_type == "EcuInstallationCompleted"); + break; + default: + std::cout << "Unexpected event"; + EXPECT_EQ(0, 1); + break; + } + } + return HttpResponse("", 200, CURLE_OK, ""); + } + + unsigned int events_seen{0}; +}; + +/* + * Initialize -> CheckUpdates -> download and install one update -> download and + * install second update + * + * This is intended to cover the case where an implementer splits an update with + * multiple targets and downloads and/or installs them separately. This is + * supported as long as a check for updates isn't done inbetween. It was briefly + * broken by overzealously dropping Targets metadata and fixed in + * 99c7f7ef20da76a2d6eefd08be5529c36434b9a6. + */ +TEST(Aktualizr, SplitUpdates) { + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path(), fake_meta_dir); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + + auto storage = INvStorage::newStorage(conf.storage); + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + + struct { + size_t num_events{0}; + std::future future; + std::promise promise; + } ev_state; + ev_state.future = ev_state.promise.get_future(); + + auto f_cb = [&ev_state](const std::shared_ptr& event) { + if (event->isTypeOf()) { + return; + } + LOG_INFO << "Got " << event->variant; + switch (ev_state.num_events) { + case 0: { + ASSERT_EQ(event->variant, "UpdateCheckComplete"); + const auto targets_event = dynamic_cast(event.get()); + EXPECT_EQ(targets_event->result.ecus_count, 2); + EXPECT_EQ(targets_event->result.updates.size(), 2u); + EXPECT_EQ(targets_event->result.updates[0].filename(), "primary_firmware.txt"); + EXPECT_EQ(targets_event->result.updates[1].filename(), "secondary_firmware.txt"); + EXPECT_EQ(targets_event->result.status, result::UpdateStatus::kUpdatesAvailable); + break; + } + case 1: { + ASSERT_EQ(event->variant, "DownloadTargetComplete"); + const auto download_event = dynamic_cast(event.get()); + EXPECT_TRUE(download_event->update.filename() == "primary_firmware.txt"); + EXPECT_TRUE(download_event->success); + break; + } + case 2: { + ASSERT_EQ(event->variant, "AllDownloadsComplete"); + const auto downloads_complete = dynamic_cast(event.get()); + EXPECT_EQ(downloads_complete->result.updates.size(), 1); + EXPECT_TRUE(downloads_complete->result.updates[0].filename() == "primary_firmware.txt"); + EXPECT_EQ(downloads_complete->result.status, result::DownloadStatus::kSuccess); + break; + } + case 3: { + // Primary always gets installed first. (Not a requirement, just how it + // works at present.) + ASSERT_EQ(event->variant, "InstallStarted"); + const auto install_started = dynamic_cast(event.get()); + EXPECT_EQ(install_started->serial.ToString(), "CA:FE:A6:D2:84:9D"); + break; + } + case 4: { + // Primary should complete before Secondary begins. (Again not a + // requirement per se.) + ASSERT_EQ(event->variant, "InstallTargetComplete"); + const auto install_complete = dynamic_cast(event.get()); + EXPECT_EQ(install_complete->serial.ToString(), "CA:FE:A6:D2:84:9D"); + EXPECT_TRUE(install_complete->success); + break; + } + case 5: { + ASSERT_EQ(event->variant, "AllInstallsComplete"); + const auto installs_complete = dynamic_cast(event.get()); + EXPECT_EQ(installs_complete->result.ecu_reports.size(), 1); + EXPECT_EQ(installs_complete->result.ecu_reports[0].install_res.result_code.num_code, + data::ResultCode::Numeric::kOk); + break; + } + case 6: { + ASSERT_EQ(event->variant, "DownloadTargetComplete"); + const auto download_event = dynamic_cast(event.get()); + EXPECT_TRUE(download_event->update.filename() == "secondary_firmware.txt"); + EXPECT_TRUE(download_event->success); + break; + } + case 7: { + ASSERT_EQ(event->variant, "AllDownloadsComplete"); + const auto downloads_complete = dynamic_cast(event.get()); + EXPECT_EQ(downloads_complete->result.updates.size(), 1); + EXPECT_TRUE(downloads_complete->result.updates[0].filename() == "secondary_firmware.txt"); + EXPECT_EQ(downloads_complete->result.status, result::DownloadStatus::kSuccess); + break; + } + case 8: { + ASSERT_EQ(event->variant, "InstallStarted"); + const auto install_started = dynamic_cast(event.get()); + EXPECT_EQ(install_started->serial.ToString(), "secondary_ecu_serial"); + break; + } + case 9: { + ASSERT_EQ(event->variant, "InstallTargetComplete"); + const auto install_complete = dynamic_cast(event.get()); + EXPECT_EQ(install_complete->serial.ToString(), "secondary_ecu_serial"); + EXPECT_TRUE(install_complete->success); + break; + } + case 10: { + ASSERT_EQ(event->variant, "AllInstallsComplete"); + const auto installs_complete = dynamic_cast(event.get()); + EXPECT_EQ(installs_complete->result.ecu_reports.size(), 1); + EXPECT_EQ(installs_complete->result.ecu_reports[0].install_res.result_code.num_code, + data::ResultCode::Numeric::kOk); + break; + } + case 11: { + ASSERT_EQ(event->variant, "UpdateCheckComplete"); + const auto targets_event = dynamic_cast(event.get()); + EXPECT_EQ(targets_event->result.ecus_count, 0); + EXPECT_EQ(targets_event->result.updates.size(), 0); + EXPECT_EQ(targets_event->result.status, result::UpdateStatus::kNoUpdatesAvailable); + ev_state.promise.set_value(); + break; + } + case 15: + // Don't let the test run indefinitely! + FAIL() << "Unexpected events!"; + default: + std::cout << "event #" << ev_state.num_events << " is: " << event->variant << "\n"; + ASSERT_EQ(event->variant, ""); + } + ++ev_state.num_events; + }; + boost::signals2::connection conn = aktualizr.SetSignalHandler(f_cb); + + aktualizr.Initialize(); + result::UpdateCheck update_result = aktualizr.CheckUpdates().get(); + ASSERT_EQ(update_result.status, result::UpdateStatus::kUpdatesAvailable); + ASSERT_EQ(update_result.ecus_count, 2); + + // Try to install the updates in two separate calls. + result::Download download_result = aktualizr.Download({update_result.updates[0]}).get(); + ASSERT_EQ(download_result.status, result::DownloadStatus::kSuccess); + aktualizr.Install(download_result.updates); + + download_result = aktualizr.Download({update_result.updates[1]}).get(); + ASSERT_EQ(download_result.status, result::DownloadStatus::kSuccess); + aktualizr.Install(download_result.updates); + + update_result = aktualizr.CheckUpdates().get(); + ASSERT_EQ(update_result.status, result::UpdateStatus::kNoUpdatesAvailable); + + auto status = ev_state.future.wait_for(std::chrono::seconds(20)); + if (status != std::future_status::ready) { + FAIL() << "Timed out waiting for installation to complete."; + } + EXPECT_EQ(http->events_seen, 8); +} + class HttpFakePutCounter : public HttpFake { public: HttpFakePutCounter(const boost::filesystem::path& test_dir_in, const boost::filesystem::path& meta_dir_in) @@ -392,10 +593,10 @@ class HttpFakePutCounter : public HttpFake { }; /* - * Initialize -> UptaneCycle -> updates downloaded and installed for primary - * (after reboot) and secondary (aktualizr_test.cc) + * Initialize -> UptaneCycle -> updates downloaded and installed for Primary + * (after reboot) and Secondary (aktualizr_test.cc) * - * It simulates closely the OStree case which needs a reboot after applying an + * It simulates closely the OSTree case which needs a reboot after applying an * update, but uses `PackageManagerFake`. * * Checks actions: @@ -405,7 +606,7 @@ class HttpFakePutCounter : public HttpFake { * - [x] Send manifest * - [x] Update is not in pending state anymore after successful finalization * - * - [x] Install an update on the primary + * - [x] Install an update on the Primary * - [x] Set new version to pending status after an OSTree update trigger * - [x] Send EcuInstallationAppliedReport to server after an OSTree update trigger * - [x] Uptane check for updates and manifest sends are disabled while an installation @@ -433,9 +634,9 @@ TEST(Aktualizr, FullWithUpdatesNeedReboot) { // check that no manifest has been sent after the update application EXPECT_EQ(http->manifest_sends, 1); - EXPECT_EQ(http->count_event_with_type("EcuInstallationStarted"), 2); // two ecus have started - EXPECT_EQ(http->count_event_with_type("EcuInstallationApplied"), 1); // primary ecu has been applied - EXPECT_EQ(http->count_event_with_type("EcuInstallationCompleted"), 1); // secondary ecu has been updated + EXPECT_EQ(http->count_event_with_type("EcuInstallationStarted"), 2); // two ECUs have started + EXPECT_EQ(http->count_event_with_type("EcuInstallationApplied"), 1); // Primary ECU has been applied + EXPECT_EQ(http->count_event_with_type("EcuInstallationCompleted"), 1); // Secondary ECU has been updated { // second run: before reboot, re-use the storage @@ -468,14 +669,14 @@ TEST(Aktualizr, FullWithUpdatesNeedReboot) { result::UpdateCheck update_res = aktualizr.CheckUpdates().get(); EXPECT_EQ(update_res.status, result::UpdateStatus::kNoUpdatesAvailable); - // primary is installed, nothing pending + // Primary is installed, nothing pending boost::optional current_target; boost::optional pending_target; storage->loadPrimaryInstalledVersions(¤t_target, &pending_target); EXPECT_TRUE(!!current_target); EXPECT_FALSE(!!pending_target); - // secondary is installed, nothing pending + // Secondary is installed, nothing pending boost::optional sec_current_target; boost::optional sec_pending_target; storage->loadInstalledVersions("secondary_ecu_serial", &sec_current_target, &sec_pending_target); @@ -589,7 +790,7 @@ class EventHandler { * Initialize -> UptaneCycle -> download updates and install them -> * -> reboot emulated -> Initialize -> Fail installation finalization * - * Verifies whether the uptane client is not at pending state after installation finalization failure + * Verifies whether the Uptane client is not at pending state after installation finalization failure * * Checks actions: * @@ -636,12 +837,12 @@ TEST(Aktualizr, FinalizationFailure) { auto aktualizr_cycle_thread_status = aktualizr_cycle_thread.wait_for(std::chrono::seconds(20)); ASSERT_EQ(aktualizr_cycle_thread_status, std::future_status::ready); - EXPECT_TRUE(aktualizr.uptane_client()->bootloader->rebootDetected()); + EXPECT_TRUE(aktualizr.uptane_client()->isInstallCompletionRequired()); EXPECT_TRUE(event_hdlr.checkReceivedEvents(expected_event_order)); EXPECT_TRUE(aktualizr.uptane_client()->hasPendingUpdates()); EXPECT_TRUE(http_server_mock->checkReceivedReports(expected_report_order)); - // Aktualizr reports to a server that installation was successfull for the secondary - // checkReceivedReports() verifies whether EcuInstallationApplied was reported for the primary + // Aktualizr reports to a server that installation was successfull for the Secondary + // checkReceivedReports() verifies whether EcuInstallationApplied was reported for the Primary EXPECT_FALSE(http_server_mock->wasInstallSuccessful(primary_ecu_id)); EXPECT_TRUE(http_server_mock->wasInstallSuccessful(secondary_ecu_id)); @@ -715,7 +916,7 @@ TEST(Aktualizr, FinalizationFailure) { EXPECT_FALSE(storage->loadDeviceInstallationResult(&dev_installation_res, &report, &correlation_id)); // it's used to return `true` even if there is no any record in DB - // of the uptane cycle just after sending manifest + // of the Uptane cycle just after sending manifest // made it consistent with loadDeviceInstallationResult std::vector> ecu_installation_res; EXPECT_FALSE(storage->loadEcuInstallationResults(&ecu_installation_res)); @@ -740,7 +941,7 @@ TEST(Aktualizr, FinalizationFailure) { /* * Initialize -> UptaneCycle -> download updates -> fail installation * - * Verifies whether the uptane client is not at pending state after installation failure + * Verifies whether the Uptane client is not at pending state after installation failure * * Checks actions: * @@ -813,13 +1014,13 @@ TEST(Aktualizr, InstallationFailure) { fiu_disable("fake_package_install"); } - // primary and secondary failure - { + // Primary and Secondary failure + for (std::string prefix : {"secondary_install_", "secondary_sendfirmware_"}) { TemporaryDirectory temp_dir; auto http_server_mock = std::make_shared(temp_dir.Path(), fake_meta_dir); Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http_server_mock->tls_server); auto storage = INvStorage::newStorage(conf.storage); - const std::string sec_fault_name = std::string("secondary_install_") + secondary_ecu_id; + const std::string sec_fault_name = prefix + secondary_ecu_id; fault_injection_init(); fault_injection_enable("fake_package_install", 1, "PRIMFAIL", 0); @@ -877,10 +1078,52 @@ TEST(Aktualizr, InstallationFailure) { } } +/* + * Verifies that updates fail after metadata verification failure reported by Secondaries + */ +TEST(Aktualizr, SecondaryMetaFailure) { + TemporaryDirectory temp_dir; + auto http_server_mock = std::make_shared(temp_dir.Path(), "hasupdates", fake_meta_dir); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http_server_mock->tls_server); + auto storage = INvStorage::newStorage(conf.storage); + + fault_injection_init(); + fiu_enable("secondary_putmetadata", 1, nullptr, 0); + + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http_server_mock); + + struct { + result::Install result; + std::promise promise; + } ev_state; + + auto f_cb = [&ev_state](const std::shared_ptr& event) { + if (event->variant != "AllInstallsComplete") { + return; + } + const auto installs_complete = dynamic_cast(event.get()); + ev_state.result = installs_complete->result; + + ev_state.promise.set_value(); + }; + boost::signals2::connection conn = aktualizr.SetSignalHandler(f_cb); + aktualizr.Initialize(); + aktualizr.UptaneCycle(); + + auto status = ev_state.promise.get_future().wait_for(std::chrono::seconds(20)); + if (status != std::future_status::ready) { + FAIL() << "Timed out waiting for installation to complete."; + } + + ASSERT_FALSE(ev_state.result.dev_report.isSuccess()); + + fiu_disable("secondary_putmetadata"); +} + #endif // FIU_ENABLE /* - * Initialize -> UptaneCycle -> updates downloaded and installed for primary + * Initialize -> UptaneCycle -> updates downloaded and installed for Primary * -> reboot emulated -> Initialize -> Finalize Installation After Reboot * * Verifies an auto reboot and pending updates finalization after a reboot @@ -907,9 +1150,10 @@ TEST(Aktualizr, AutoRebootAfterUpdate) { aktualizr.Initialize(); auto aktualizr_cycle_thread = aktualizr.RunForever(); auto aktualizr_cycle_thread_status = aktualizr_cycle_thread.wait_for(std::chrono::seconds(20)); + aktualizr.Shutdown(); EXPECT_EQ(aktualizr_cycle_thread_status, std::future_status::ready); - EXPECT_TRUE(aktualizr.uptane_client()->bootloader->rebootDetected()); + EXPECT_TRUE(aktualizr.uptane_client()->isInstallCompletionRequired()); } { @@ -922,21 +1166,21 @@ TEST(Aktualizr, AutoRebootAfterUpdate) { result::UpdateCheck update_res = aktualizr.CheckUpdates().get(); EXPECT_EQ(update_res.status, result::UpdateStatus::kNoUpdatesAvailable); - // primary is installed, nothing pending + // Primary is installed, nothing pending boost::optional current_target; boost::optional pending_target; storage->loadPrimaryInstalledVersions(¤t_target, &pending_target); EXPECT_TRUE(!!current_target); EXPECT_FALSE(!!pending_target); - EXPECT_EQ(http->manifest_sends, 4); + EXPECT_EQ(http->manifest_sends, 3); } } /* - * Initialize -> UptaneCycle -> updates downloaded and installed for secondaries - * without changing the primary. + * Initialize -> UptaneCycle -> updates downloaded and installed for Secondaries + * without changing the Primary. - * Store installation result for secondary + * Store installation result for Secondary */ TEST(Aktualizr, FullMultipleSecondaries) { TemporaryDirectory temp_dir; @@ -945,6 +1189,7 @@ TEST(Aktualizr, FullMultipleSecondaries) { conf.provision.primary_ecu_serial = "testecuserial"; conf.provision.primary_ecu_hardware_id = "testecuhwid"; conf.storage.path = temp_dir.Path(); + conf.pacman.images_path = temp_dir.Path() / "images"; conf.tls.server = http->tls_server; conf.uptane.director_server = http->tls_server + "/director"; conf.uptane.repo_server = http->tls_server + "/repo"; @@ -1008,7 +1253,7 @@ TEST(Aktualizr, FullMultipleSecondaries) { "secondary_firmware2.txt"); EXPECT_EQ(manifest_versions["sec_serial2"]["signed"]["installed_image"]["fileinfo"]["length"].asUInt(), 21); - // Make sure there is no result for the primary by checking the size + // Make sure there is no result for the Primary by checking the size EXPECT_EQ(manifest["installation_report"]["report"]["items"].size(), 2); EXPECT_EQ(manifest["installation_report"]["report"]["items"][0]["ecu"].asString(), "sec_serial1"); EXPECT_TRUE(manifest["installation_report"]["report"]["items"][0]["result"]["success"].asBool()); @@ -1041,7 +1286,7 @@ TEST(Aktualizr, CheckNoUpdates) { LOG_INFO << "Got " << event->variant; switch (ev_state.num_events) { case 0: { - EXPECT_EQ(event->variant, "UpdateCheckComplete"); + ASSERT_EQ(event->variant, "UpdateCheckComplete"); const auto targets_event = dynamic_cast(event.get()); EXPECT_EQ(targets_event->result.ecus_count, 0); EXPECT_EQ(targets_event->result.updates.size(), 0); @@ -1049,7 +1294,7 @@ TEST(Aktualizr, CheckNoUpdates) { break; } case 1: { - EXPECT_EQ(event->variant, "UpdateCheckComplete"); + ASSERT_EQ(event->variant, "UpdateCheckComplete"); const auto targets_event = dynamic_cast(event.get()); EXPECT_EQ(targets_event->result.ecus_count, 0); EXPECT_EQ(targets_event->result.updates.size(), 0); @@ -1062,7 +1307,7 @@ TEST(Aktualizr, CheckNoUpdates) { FAIL() << "Unexpected events!"; default: std::cout << "event #" << ev_state.num_events << " is: " << event->variant << "\n"; - EXPECT_EQ(event->variant, ""); + ASSERT_EQ(event->variant, ""); } ++ev_state.num_events; }; @@ -1117,14 +1362,14 @@ TEST(Aktualizr, DownloadWithUpdates) { LOG_INFO << "Got " << event->variant; switch (ev_state.num_events) { case 0: { - EXPECT_EQ(event->variant, "AllDownloadsComplete"); + ASSERT_EQ(event->variant, "AllDownloadsComplete"); const auto downloads_complete = dynamic_cast(event.get()); EXPECT_EQ(downloads_complete->result.updates.size(), 0); EXPECT_EQ(downloads_complete->result.status, result::DownloadStatus::kError); break; } case 1: { - EXPECT_EQ(event->variant, "UpdateCheckComplete"); + ASSERT_EQ(event->variant, "UpdateCheckComplete"); const auto targets_event = dynamic_cast(event.get()); EXPECT_EQ(targets_event->result.ecus_count, 2); EXPECT_EQ(targets_event->result.updates.size(), 2u); @@ -1135,7 +1380,7 @@ TEST(Aktualizr, DownloadWithUpdates) { } case 2: case 3: { - EXPECT_EQ(event->variant, "DownloadTargetComplete"); + ASSERT_EQ(event->variant, "DownloadTargetComplete"); const auto download_event = dynamic_cast(event.get()); EXPECT_TRUE(download_event->update.filename() == "primary_firmware.txt" || download_event->update.filename() == "secondary_firmware.txt"); @@ -1143,7 +1388,7 @@ TEST(Aktualizr, DownloadWithUpdates) { break; } case 4: { - EXPECT_EQ(event->variant, "AllDownloadsComplete"); + ASSERT_EQ(event->variant, "AllDownloadsComplete"); const auto downloads_complete = dynamic_cast(event.get()); EXPECT_EQ(downloads_complete->result.updates.size(), 2); EXPECT_TRUE(downloads_complete->result.updates[0].filename() == "primary_firmware.txt" || @@ -1159,7 +1404,7 @@ TEST(Aktualizr, DownloadWithUpdates) { FAIL() << "Unexpected events!"; default: std::cout << "event #" << ev_state.num_events << " is: " << event->variant << "\n"; - EXPECT_EQ(event->variant, ""); + ASSERT_EQ(event->variant, ""); } ++ev_state.num_events; }; @@ -1170,6 +1415,7 @@ TEST(Aktualizr, DownloadWithUpdates) { result::Download result = aktualizr.Download(std::vector()).get(); EXPECT_EQ(result.updates.size(), 0); EXPECT_EQ(result.status, result::DownloadStatus::kError); + result::UpdateCheck update_result = aktualizr.CheckUpdates().get(); aktualizr.Download(update_result.updates); @@ -1342,11 +1588,11 @@ TEST(Aktualizr, TargetAutoremove) { Utils::createDirectories(local_metadir, S_IRWXU); auto http = std::make_shared(temp_dir.Path(), "", local_metadir / "repo"); - UptaneRepo repo{local_metadir, "2021-07-04T16:33:27Z", "id0"}; + UptaneRepo repo{local_metadir, "2025-07-04T16:33:27Z", "id0"}; repo.generateRepo(KeyType::kED25519); const std::string hwid = "primary_hw"; - repo.addImage(fake_meta_dir / "fake_meta/primary_firmware.txt", "primary_firmware.txt", hwid, "", {}); - repo.addTarget("primary_firmware.txt", hwid, "CA:FE:A6:D2:84:9D", ""); + repo.addImage(fake_meta_dir / "fake_meta/primary_firmware.txt", "primary_firmware.txt", hwid); + repo.addTarget("primary_firmware.txt", hwid, "CA:FE:A6:D2:84:9D"); repo.signTargets(); Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); @@ -1374,8 +1620,8 @@ TEST(Aktualizr, TargetAutoremove) { // second install repo.emptyTargets(); - repo.addImage(fake_meta_dir / "fake_meta/dummy_firmware.txt", "dummy_firmware.txt", hwid, "", {}); - repo.addTarget("dummy_firmware.txt", hwid, "CA:FE:A6:D2:84:9D", ""); + repo.addImage(fake_meta_dir / "fake_meta/dummy_firmware.txt", "dummy_firmware.txt", hwid); + repo.addTarget("dummy_firmware.txt", hwid, "CA:FE:A6:D2:84:9D"); repo.signTargets(); { @@ -1394,8 +1640,8 @@ TEST(Aktualizr, TargetAutoremove) { // third install (first firmware again) repo.emptyTargets(); - repo.addImage(fake_meta_dir / "fake_meta/primary_firmware.txt", "primary_firmware.txt", hwid, "", {}); - repo.addTarget("primary_firmware.txt", hwid, "CA:FE:A6:D2:84:9D", ""); + repo.addImage(fake_meta_dir / "fake_meta/primary_firmware.txt", "primary_firmware.txt", hwid); + repo.addTarget("primary_firmware.txt", hwid, "CA:FE:A6:D2:84:9D"); repo.signTargets(); { @@ -1414,8 +1660,8 @@ TEST(Aktualizr, TargetAutoremove) { // fourth install (some new third firmware) repo.emptyTargets(); - repo.addImage(fake_meta_dir / "fake_meta/secondary_firmware.txt", "secondary_firmware.txt", hwid, "", {}); - repo.addTarget("secondary_firmware.txt", hwid, "CA:FE:A6:D2:84:9D", ""); + repo.addImage(fake_meta_dir / "fake_meta/secondary_firmware.txt", "secondary_firmware.txt", hwid); + repo.addTarget("secondary_firmware.txt", hwid, "CA:FE:A6:D2:84:9D"); repo.signTargets(); { @@ -1476,13 +1722,13 @@ TEST(Aktualizr, InstallWithUpdates) { LOG_INFO << "Got " << event->variant; switch (ev_state.num_events) { case 0: { - EXPECT_EQ(event->variant, "AllInstallsComplete"); + ASSERT_EQ(event->variant, "AllInstallsComplete"); const auto installs_complete = dynamic_cast(event.get()); EXPECT_EQ(installs_complete->result.ecu_reports.size(), 0); break; } case 1: { - EXPECT_EQ(event->variant, "UpdateCheckComplete"); + ASSERT_EQ(event->variant, "UpdateCheckComplete"); const auto targets_event = dynamic_cast(event.get()); EXPECT_EQ(targets_event->result.ecus_count, 2); EXPECT_EQ(targets_event->result.updates.size(), 2u); @@ -1494,7 +1740,7 @@ TEST(Aktualizr, InstallWithUpdates) { } case 2: case 3: { - EXPECT_EQ(event->variant, "DownloadTargetComplete"); + ASSERT_EQ(event->variant, "DownloadTargetComplete"); const auto download_event = dynamic_cast(event.get()); EXPECT_TRUE(download_event->update.filename() == "primary_firmware.txt" || download_event->update.filename() == "secondary_firmware.txt"); @@ -1502,7 +1748,7 @@ TEST(Aktualizr, InstallWithUpdates) { break; } case 4: { - EXPECT_EQ(event->variant, "AllDownloadsComplete"); + ASSERT_EQ(event->variant, "AllDownloadsComplete"); const auto downloads_complete = dynamic_cast(event.get()); EXPECT_EQ(downloads_complete->result.updates.size(), 2); EXPECT_TRUE(downloads_complete->result.updates[0].filename() == "primary_firmware.txt" || @@ -1515,35 +1761,35 @@ TEST(Aktualizr, InstallWithUpdates) { case 5: { // Primary always gets installed first. (Not a requirement, just how it // works at present.) - EXPECT_EQ(event->variant, "InstallStarted"); + ASSERT_EQ(event->variant, "InstallStarted"); const auto install_started = dynamic_cast(event.get()); EXPECT_EQ(install_started->serial.ToString(), "CA:FE:A6:D2:84:9D"); break; } case 6: { - // Primary should complete before secondary begins. (Again not a + // Primary should complete before Secondary begins. (Again not a // requirement per se.) - EXPECT_EQ(event->variant, "InstallTargetComplete"); + ASSERT_EQ(event->variant, "InstallTargetComplete"); const auto install_complete = dynamic_cast(event.get()); EXPECT_EQ(install_complete->serial.ToString(), "CA:FE:A6:D2:84:9D"); EXPECT_TRUE(install_complete->success); break; } case 7: { - EXPECT_EQ(event->variant, "InstallStarted"); + ASSERT_EQ(event->variant, "InstallStarted"); const auto install_started = dynamic_cast(event.get()); EXPECT_EQ(install_started->serial.ToString(), "secondary_ecu_serial"); break; } case 8: { - EXPECT_EQ(event->variant, "InstallTargetComplete"); + ASSERT_EQ(event->variant, "InstallTargetComplete"); const auto install_complete = dynamic_cast(event.get()); EXPECT_EQ(install_complete->serial.ToString(), "secondary_ecu_serial"); EXPECT_TRUE(install_complete->success); break; } case 9: { - EXPECT_EQ(event->variant, "AllInstallsComplete"); + ASSERT_EQ(event->variant, "AllInstallsComplete"); const auto installs_complete = dynamic_cast(event.get()); EXPECT_EQ(installs_complete->result.ecu_reports.size(), 2); EXPECT_EQ(installs_complete->result.ecu_reports[0].install_res.result_code.num_code, @@ -1558,7 +1804,7 @@ TEST(Aktualizr, InstallWithUpdates) { FAIL() << "Unexpected events!"; default: std::cout << "event #" << ev_state.num_events << " is: " << event->variant << "\n"; - EXPECT_EQ(event->variant, ""); + ASSERT_EQ(event->variant, ""); } ++ev_state.num_events; }; @@ -1571,7 +1817,7 @@ TEST(Aktualizr, InstallWithUpdates) { primary_json["hashes"]["sha512"] = "91814ad1c13ebe2af8d65044893633c4c3ce964edb8cb58b0f357406c255f7be94f42547e108b300346a42cd57662e4757b9d843b7acbc09" "0df0bc05fe55297f"; - primary_json["length"] = 2; + primary_json["length"] = 59; Uptane::Target primary_target("primary_firmware.txt", primary_json); Json::Value secondary_json; @@ -1579,7 +1825,7 @@ TEST(Aktualizr, InstallWithUpdates) { secondary_json["hashes"]["sha512"] = "7dbae4c36a2494b731a9239911d3085d53d3e400886edb4ae2b9b78f40bda446649e83ba2d81653f614cc66f5dd5d4dbd95afba854f148af" "bfae48d0ff4cc38a"; - secondary_json["length"] = 2; + secondary_json["length"] = 15; Uptane::Target secondary_target("secondary_firmware.txt", secondary_json); // First try installing nothing. Nothing should happen. @@ -1593,9 +1839,9 @@ TEST(Aktualizr, InstallWithUpdates) { result::UpdateCheck update_result = aktualizr.CheckUpdates().get(); aktualizr.Download(update_result.updates).get(); - EXPECT_NE(aktualizr.OpenStoredTarget(primary_target).get(), nullptr) + EXPECT_NO_THROW(aktualizr.OpenStoredTarget(primary_target)) << "Primary firmware is not present in storage after the download"; - EXPECT_NE(aktualizr.OpenStoredTarget(secondary_target).get(), nullptr) + EXPECT_NO_THROW(aktualizr.OpenStoredTarget(secondary_target)) << "Secondary firmware is not present in storage after the download"; // After updates have been downloaded, try to install them. @@ -1648,7 +1894,7 @@ TEST(Aktualizr, ReportDownloadProgress) { result::UpdateCheck update_result = aktualizr.CheckUpdates().get(); ASSERT_EQ(update_result.status, result::UpdateStatus::kUpdatesAvailable); - // The test mocks are tailored to emulate a device with a primary ECU and one secondary ECU + // The test mocks are tailored to emulate a device with a Primary ECU and one Secondary ECU // for sake of the download progress report testing it's suffice to test it agains just one of the ECUs update_result.updates.pop_back(); @@ -1784,7 +2030,7 @@ class HttpFakeNoCorrelationId : public HttpFake { unsigned int events_seen{0}; }; -/* Correlation ID is empty if none was provided in targets metadata. */ +/* Correlation ID is empty if none was provided in Targets metadata. */ TEST(Aktualizr, FullNoCorrelationId) { TemporaryDirectory temp_dir; TemporaryDirectory meta_dir; @@ -1834,6 +2080,29 @@ TEST(Aktualizr, ManifestCustom) { } } +TEST(Aktualizr, CustomInstallationRawReport) { + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path(), "hasupdates", fake_meta_dir); + + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + auto storage = INvStorage::newStorage(conf.storage); + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + + aktualizr.Initialize(); + result::UpdateCheck update_result = aktualizr.CheckUpdates().get(); + result::Download download_result = aktualizr.Download(update_result.updates).get(); + result::Install install_result = aktualizr.Install(download_result.updates).get(); + + auto custom_raw_report = "Installation's custom raw report!"; + EXPECT_TRUE(aktualizr.SetInstallationRawReport(custom_raw_report)); + aktualizr.SendManifest().get(); + EXPECT_EQ(http->last_manifest["signed"]["installation_report"]["report"]["raw_report"], custom_raw_report); + + // After sending manifest, an installation report will be removed from the DB, + // so Aktualzr::SetInstallationRawReport must return a negative value. + EXPECT_FALSE(aktualizr.SetInstallationRawReport(custom_raw_report)); +} + class CountUpdateCheckEvents { public: CountUpdateCheckEvents() = default; @@ -1984,12 +2253,12 @@ TEST(Aktualizr, PauseResumeQueue) { &is_paused](std::shared_ptr event) { switch (n_events) { case 0: - EXPECT_EQ(event->variant, "UpdateCheckComplete"); + ASSERT_EQ(event->variant, "UpdateCheckComplete"); break; case 1: { std::lock_guard guard(mutex); - EXPECT_EQ(event->variant, "UpdateCheckComplete"); + ASSERT_EQ(event->variant, "UpdateCheckComplete"); // the event shouldn't happen when the system is paused EXPECT_FALSE(is_paused); end_promise.set_value(); @@ -2032,11 +2301,61 @@ TEST(Aktualizr, PauseResumeQueue) { } } +const std::string custom_hwinfo = + R"([{"description":"ECU1","ECU P/N":"AAA","HW P/N":"BBB","HW Version":"1.234", + "SW P/N":"CCC","SW Version":"4.321","ECU Serial":"AAA-BBB-CCC"}, + {"description":"ECU2","ECU P/N":"ZZZ","HW P/N":"XXX","HW Version":"9.876", + "SW P/N":"YYY","SW Version":"6.789","ECU Serial":"VVV-NNN-MMM"}])"; + +class HttpSystemInfo : public HttpFake { + public: + HttpSystemInfo(const boost::filesystem::path& test_dir_in, const boost::filesystem::path& meta_dir_in) + : HttpFake(test_dir_in, "", meta_dir_in) {} + + HttpResponse put(const std::string& url, const Json::Value& data) override { + if (url.find(hwinfo_ep_) == url.length() - hwinfo_ep_.length()) { + if (info_count_ == 0) { // expect lshw data + EXPECT_TRUE(data.isObject()); + EXPECT_TRUE(data.isMember("description")); + } else if (info_count_ == 1) { // expect custom data + auto hwinfo = Utils::parseJSON(custom_hwinfo); + EXPECT_EQ(hwinfo, data); + } + info_count_++; + return HttpResponse("", 200, CURLE_OK, ""); + } else if (url.find("/manifest") != std::string::npos) { + return HttpResponse("", 200, CURLE_OK, ""); + } + return HttpResponse("", 404, CURLE_HTTP_RETURNED_ERROR, "Not found"); + } + + ~HttpSystemInfo() { EXPECT_EQ(info_count_, 2); } + + int info_count_{0}; + std::string hwinfo_ep_{"/system_info"}; +}; + +TEST(Aktualizr, CustomHwInfo) { + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path(), fake_meta_dir); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + + auto storage = INvStorage::newStorage(conf.storage); + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + aktualizr.Initialize(); + + aktualizr.SendDeviceData().get(); + + auto hwinfo = Utils::parseJSON(custom_hwinfo); + aktualizr.SetCustomHardwareInfo(hwinfo); + aktualizr.SendDeviceData().get(); +} + #ifndef __NO_MAIN__ int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); if (argc != 2) { - std::cerr << "Error: " << argv[0] << " requires the path to the base directory of uptane repos.\n"; + std::cerr << "Error: " << argv[0] << " requires the path to the base directory of Uptane repos.\n"; return EXIT_FAILURE; } uptane_repos_dir = argv[1]; diff --git a/src/libaktualizr/primary/custom_url_test.cc b/src/libaktualizr/primary/custom_url_test.cc index 95a2eafe14..355b6ae82a 100644 --- a/src/libaktualizr/primary/custom_url_test.cc +++ b/src/libaktualizr/primary/custom_url_test.cc @@ -3,7 +3,7 @@ #include #include "httpfake.h" -#include "primary/aktualizr.h" +#include "libaktualizr/aktualizr.h" #include "test_utils.h" #include "uptane_test_common.h" @@ -28,10 +28,10 @@ class HttpCheckUrl : public HttpFake { }; /* - * If the URL from the Director is unset, but the URL from the Images repo is + * If the URL from the Director is unset, but the URL from the Image repo is * set, use that. */ -TEST(Aktualizr, ImagesCustomUrl) { +TEST(Aktualizr, ImageCustomUrl) { TemporaryDirectory temp_dir; TemporaryDirectory meta_dir; auto http = std::make_shared(temp_dir.Path(), meta_dir.Path() / "repo"); @@ -59,7 +59,7 @@ TEST(Aktualizr, ImagesCustomUrl) { } /* - * If the URL is set by both the Director and Images repo, use the version from + * If the URL is set by both the Director and Image repo, use the version from * the Director. */ TEST(Aktualizr, BothCustomUrl) { diff --git a/src/libaktualizr/primary/device_cred_prov_test.cc b/src/libaktualizr/primary/device_cred_prov_test.cc index c21cc16418..1e5fba25de 100644 --- a/src/libaktualizr/primary/device_cred_prov_test.cc +++ b/src/libaktualizr/primary/device_cred_prov_test.cc @@ -9,17 +9,37 @@ #include "httpfake.h" #include "logging/logging.h" -#include "primary/initializer.h" +#include "primary/provisioner.h" +#include "primary/provisioner_test_utils.h" #include "primary/sotauptaneclient.h" #include "storage/invstorage.h" #include "uptane/uptanerepository.h" #include "utilities/utils.h" /** - * Verify that when provisioning with device credentials, aktualizr halts if + * Verify that when provisioning with device credentials, aktualizr fails if * credentials are not available. */ -TEST(DeviceCredProv, Failure) { +TEST(DeviceCredProv, DeviceIdFailure) { + RecordProperty("zephyr_key", "OTA-1209,TST-185"); + TemporaryDirectory temp_dir; + Config config; + config.storage.path = temp_dir.Path(); + EXPECT_EQ(config.provision.mode, ProvisionMode::kDeviceCred); + + auto storage = INvStorage::newStorage(config.storage); + auto http = std::make_shared(temp_dir.Path()); + auto keys = std::make_shared(storage, config.keymanagerConfig()); + + // Expect failure when trying to read the certificate to get the device ID. + ExpectProvisionError(Provisioner(config.provision, storage, http, keys, {})); +} + +/** + * Verify that when provisioning with device credentials, aktualizr fails if + * device ID is provided but credentials are not available. + */ +TEST(DeviceCredProv, TlsFailure) { RecordProperty("zephyr_key", "OTA-1209,TST-185"); TemporaryDirectory temp_dir; Config config; @@ -30,14 +50,14 @@ TEST(DeviceCredProv, Failure) { auto storage = INvStorage::newStorage(config.storage); auto http = std::make_shared(temp_dir.Path()); - KeyManager keys(storage, config.keymanagerConfig()); + auto keys = std::make_shared(storage, config.keymanagerConfig()); - Initializer initializer(config.provision, storage, http, keys, {}); - EXPECT_FALSE(initializer.isSuccessful()); + // Expect failure when trying to read the TLS credentials. + ExpectProvisionError(Provisioner(config.provision, storage, http, keys, {})); } /** - * Verfiy that aktualizr halts when provided incomplete device provisioning + * Verify that aktualizr halts when provided incomplete device provisioning * credentials. */ TEST(DeviceCredProv, Incomplete) { @@ -53,102 +73,96 @@ TEST(DeviceCredProv, Incomplete) { auto http = std::make_shared(temp_dir.Path()); { - config.import.tls_cacert_path = BasedPath("ca.pem"); - config.import.tls_clientcert_path = BasedPath(""); - config.import.tls_pkey_path = BasedPath(""); + config.import.tls_cacert_path = utils::BasedPath("ca.pem"); + config.import.tls_clientcert_path = utils::BasedPath(""); + config.import.tls_pkey_path = utils::BasedPath(""); Utils::createDirectories(temp_dir / "import", S_IRWXU); boost::filesystem::copy_file("tests/test_data/device_cred_prov/ca.pem", temp_dir / "import/ca.pem"); auto storage = INvStorage::newStorage(config.storage); storage->importData(config.import); - KeyManager keys(storage, config.keymanagerConfig()); + auto keys = std::make_shared(storage, config.keymanagerConfig()); - Initializer initializer(config.provision, storage, http, keys, {}); - EXPECT_FALSE(initializer.isSuccessful()); + ExpectProvisionError(Provisioner(config.provision, storage, http, keys, {})); } { - config.import.tls_cacert_path = BasedPath(""); - config.import.tls_clientcert_path = BasedPath("client.pem"); - config.import.tls_pkey_path = BasedPath(""); + config.import.tls_cacert_path = utils::BasedPath(""); + config.import.tls_clientcert_path = utils::BasedPath("client.pem"); + config.import.tls_pkey_path = utils::BasedPath(""); boost::filesystem::remove_all(temp_dir.Path()); Utils::createDirectories(temp_dir / "import", S_IRWXU); boost::filesystem::copy_file("tests/test_data/device_cred_prov/client.pem", temp_dir / "import/client.pem"); auto storage = INvStorage::newStorage(config.storage); storage->importData(config.import); - KeyManager keys(storage, config.keymanagerConfig()); + auto keys = std::make_shared(storage, config.keymanagerConfig()); - Initializer initializer(config.provision, storage, http, keys, {}); - EXPECT_FALSE(initializer.isSuccessful()); + ExpectProvisionError(Provisioner(config.provision, storage, http, keys, {})); } { - config.import.tls_cacert_path = BasedPath(""); - config.import.tls_clientcert_path = BasedPath(""); - config.import.tls_pkey_path = BasedPath("pkey.pem"); + config.import.tls_cacert_path = utils::BasedPath(""); + config.import.tls_clientcert_path = utils::BasedPath(""); + config.import.tls_pkey_path = utils::BasedPath("pkey.pem"); boost::filesystem::remove_all(temp_dir.Path()); Utils::createDirectories(temp_dir / "import", S_IRWXU); boost::filesystem::copy_file("tests/test_data/device_cred_prov/pkey.pem", temp_dir / "import/pkey.pem"); auto storage = INvStorage::newStorage(config.storage); storage->importData(config.import); - KeyManager keys(storage, config.keymanagerConfig()); + auto keys = std::make_shared(storage, config.keymanagerConfig()); - Initializer initializer(config.provision, storage, http, keys, {}); - EXPECT_FALSE(initializer.isSuccessful()); + ExpectProvisionError(Provisioner(config.provision, storage, http, keys, {})); } { - config.import.tls_cacert_path = BasedPath("ca.pem"); - config.import.tls_clientcert_path = BasedPath("client.pem"); - config.import.tls_pkey_path = BasedPath(""); + config.import.tls_cacert_path = utils::BasedPath("ca.pem"); + config.import.tls_clientcert_path = utils::BasedPath("client.pem"); + config.import.tls_pkey_path = utils::BasedPath(""); boost::filesystem::remove_all(temp_dir.Path()); Utils::createDirectories(temp_dir / "import", S_IRWXU); boost::filesystem::copy_file("tests/test_data/device_cred_prov/ca.pem", temp_dir / "import/ca.pem"); boost::filesystem::copy_file("tests/test_data/device_cred_prov/client.pem", temp_dir / "import/client.pem"); auto storage = INvStorage::newStorage(config.storage); storage->importData(config.import); - KeyManager keys(storage, config.keymanagerConfig()); + auto keys = std::make_shared(storage, config.keymanagerConfig()); - Initializer initializer(config.provision, storage, http, keys, {}); - EXPECT_FALSE(initializer.isSuccessful()); + ExpectProvisionError(Provisioner(config.provision, storage, http, keys, {})); } { - config.import.tls_cacert_path = BasedPath("ca.pem"); - config.import.tls_clientcert_path = BasedPath(""); - config.import.tls_pkey_path = BasedPath("pkey.pem"); + config.import.tls_cacert_path = utils::BasedPath("ca.pem"); + config.import.tls_clientcert_path = utils::BasedPath(""); + config.import.tls_pkey_path = utils::BasedPath("pkey.pem"); boost::filesystem::remove_all(temp_dir.Path()); Utils::createDirectories(temp_dir / "import", S_IRWXU); boost::filesystem::copy_file("tests/test_data/device_cred_prov/ca.pem", temp_dir / "import/ca.pem"); boost::filesystem::copy_file("tests/test_data/device_cred_prov/pkey.pem", temp_dir / "import/pkey.pem"); auto storage = INvStorage::newStorage(config.storage); storage->importData(config.import); - KeyManager keys(storage, config.keymanagerConfig()); + auto keys = std::make_shared(storage, config.keymanagerConfig()); - Initializer initializer(config.provision, storage, http, keys, {}); - EXPECT_FALSE(initializer.isSuccessful()); + ExpectProvisionError(Provisioner(config.provision, storage, http, keys, {})); } { - config.import.tls_cacert_path = BasedPath(""); - config.import.tls_clientcert_path = BasedPath("client.pem"); - config.import.tls_pkey_path = BasedPath("pkey.pem"); + config.import.tls_cacert_path = utils::BasedPath(""); + config.import.tls_clientcert_path = utils::BasedPath("client.pem"); + config.import.tls_pkey_path = utils::BasedPath("pkey.pem"); boost::filesystem::remove_all(temp_dir.Path()); Utils::createDirectories(temp_dir / "import", S_IRWXU); boost::filesystem::copy_file("tests/test_data/device_cred_prov/client.pem", temp_dir / "import/client.pem"); boost::filesystem::copy_file("tests/test_data/device_cred_prov/pkey.pem", temp_dir / "import/pkey.pem"); auto storage = INvStorage::newStorage(config.storage); storage->importData(config.import); - KeyManager keys(storage, config.keymanagerConfig()); + auto keys = std::make_shared(storage, config.keymanagerConfig()); - Initializer initializer(config.provision, storage, http, keys, {}); - EXPECT_FALSE(initializer.isSuccessful()); + ExpectProvisionError(Provisioner(config.provision, storage, http, keys, {})); } // Do one last round with all three files to make sure it actually works as // expected. - config.import.tls_cacert_path = BasedPath("ca.pem"); - config.import.tls_clientcert_path = BasedPath("client.pem"); - config.import.tls_pkey_path = BasedPath("pkey.pem"); + config.import.tls_cacert_path = utils::BasedPath("ca.pem"); + config.import.tls_clientcert_path = utils::BasedPath("client.pem"); + config.import.tls_pkey_path = utils::BasedPath("pkey.pem"); boost::filesystem::remove_all(temp_dir.Path()); Utils::createDirectories(temp_dir / "import", S_IRWXU); boost::filesystem::copy_file("tests/test_data/device_cred_prov/ca.pem", temp_dir / "import/ca.pem"); @@ -156,10 +170,9 @@ TEST(DeviceCredProv, Incomplete) { boost::filesystem::copy_file("tests/test_data/device_cred_prov/pkey.pem", temp_dir / "import/pkey.pem"); auto storage = INvStorage::newStorage(config.storage); storage->importData(config.import); - KeyManager keys(storage, config.keymanagerConfig()); + auto keys = std::make_shared(storage, config.keymanagerConfig()); - Initializer initializer(config.provision, storage, http, keys, {}); - EXPECT_TRUE(initializer.isSuccessful()); + ExpectProvisionOK(Provisioner(config.provision, storage, http, keys, {})); } /** @@ -175,18 +188,62 @@ TEST(DeviceCredProv, Success) { boost::filesystem::copy_file("tests/test_data/device_cred_prov/pkey.pem", temp_dir / "import/pkey.pem"); config.storage.path = temp_dir.Path(); config.import.base_path = temp_dir / "import"; - config.import.tls_cacert_path = BasedPath("ca.pem"); - config.import.tls_clientcert_path = BasedPath("client.pem"); - config.import.tls_pkey_path = BasedPath("pkey.pem"); + config.import.tls_cacert_path = utils::BasedPath("ca.pem"); + config.import.tls_clientcert_path = utils::BasedPath("client.pem"); + config.import.tls_pkey_path = utils::BasedPath("pkey.pem"); EXPECT_EQ(config.provision.mode, ProvisionMode::kDeviceCred); auto storage = INvStorage::newStorage(config.storage); storage->importData(config.import); auto http = std::make_shared(temp_dir.Path()); - KeyManager keys(storage, config.keymanagerConfig()); + auto keys = std::make_shared(storage, config.keymanagerConfig()); + + ExpectProvisionOK(Provisioner(config.provision, storage, http, keys, {})); +} + +/** + * Verify that aktualizr can reimport cert and keeps device name. + */ +TEST(DeviceCredProv, ReImportCert) { + RecordProperty("zephyr_key", "OLPSUP-12477"); + TemporaryDirectory temp_dir; + Config config; + Utils::createDirectories(temp_dir / "import", S_IRWXU); + boost::filesystem::copy_file("tests/test_data/device_cred_prov/ca.pem", temp_dir / "import/ca.pem"); + boost::filesystem::copy_file("tests/test_data/device_cred_prov/client.pem", temp_dir / "import/client.pem"); + boost::filesystem::copy_file("tests/test_data/device_cred_prov/pkey.pem", temp_dir / "import/pkey.pem"); + /*use any cert with non empty CN*/ + boost::filesystem::copy_file("tests/test_data/prov/client.pem", temp_dir / "import/newcert.pem"); + config.storage.path = temp_dir.Path(); + config.import.base_path = temp_dir / "import"; + config.import.tls_cacert_path = utils::BasedPath("ca.pem"); + config.import.tls_clientcert_path = utils::BasedPath("client.pem"); + config.import.tls_pkey_path = utils::BasedPath("pkey.pem"); + EXPECT_EQ(config.provision.mode, ProvisionMode::kDeviceCred); + config.provision.device_id = "AnYsTrInG"; + auto http = std::make_shared(temp_dir.Path()); - Initializer initializer(config.provision, storage, http, keys, {}); - EXPECT_TRUE(initializer.isSuccessful()); + { + /* prepare storage initialized with device_id from config where cert CN and device id are different */ + auto storage = INvStorage::newStorage(config.storage); + storage->importData(config.import); + auto keys = std::make_shared(storage, config.keymanagerConfig()); + ExpectProvisionOK(Provisioner(config.provision, storage, http, keys, {})); + std::string device_id; + EXPECT_TRUE(storage->loadDeviceId(&device_id)); + EXPECT_EQ(device_id, "AnYsTrInG"); + } + + { + config.import.tls_clientcert_path = utils::BasedPath("newcert.pem"); + auto storage = INvStorage::newStorage(config.storage); + EXPECT_NO_THROW(storage->importData(config.import)); + auto keys = std::make_shared(storage, config.keymanagerConfig()); + ExpectProvisionOK(Provisioner(config.provision, storage, http, keys, {})); + std::string device_id; + EXPECT_TRUE(storage->loadDeviceId(&device_id)); + EXPECT_EQ(device_id, "AnYsTrInG"); + } } #ifndef __NO_MAIN__ diff --git a/src/libaktualizr/primary/download_nonostree_test.cc b/src/libaktualizr/primary/download_nonostree_test.cc index 9743cf9885..9518c99c41 100644 --- a/src/libaktualizr/primary/download_nonostree_test.cc +++ b/src/libaktualizr/primary/download_nonostree_test.cc @@ -2,10 +2,10 @@ #include -#include "config/config.h" +#include "libaktualizr/aktualizr.h" +#include "libaktualizr/config.h" #include "logging/logging.h" #include "package_manager/ostreemanager.h" -#include "primary/aktualizr.h" #include "storage/sqlstorage.h" #include "test_utils.h" #include "uptane_test_common.h" @@ -20,7 +20,7 @@ static boost::filesystem::path sysroot; TEST(Aktualizr, DownloadNonOstreeBin) { TemporaryDirectory temp_dir; Config conf = UptaneTestCommon::makeTestConfig(temp_dir, server); - conf.pacman.type = PackageManager::kOstree; + conf.pacman.type = PACKAGE_MANAGER_OSTREE; conf.pacman.sysroot = sysroot.string(); conf.pacman.ostree_server = treehub_server; conf.pacman.os = "dummy-os"; @@ -30,11 +30,9 @@ TEST(Aktualizr, DownloadNonOstreeBin) { { std::shared_ptr storage = INvStorage::newStorage(conf.storage); - auto uptane_client = SotaUptaneClient::newDefaultClient(conf, storage); + auto uptane_client = std_::make_unique(conf, storage); uptane_client->initialize(); - EXPECT_FALSE(uptane_client->uptaneIteration(nullptr, nullptr)); - EXPECT_STREQ(uptane_client->getLastException().what(), - "The target had a non-OSTree package that can not be installed on an OSTree system."); + EXPECT_THROW(uptane_client->uptaneIteration(nullptr, nullptr), Uptane::InvalidTarget); } } @@ -46,7 +44,7 @@ int main(int argc, char **argv) { if (argc != 3) { std::cerr << "Error: " << argv[0] << " requires the path to the uptane-generator utility " - << "and an OStree sysroot\n"; + << "and an OSTree sysroot\n"; return EXIT_FAILURE; } diff --git a/src/libaktualizr/primary/empty_targets_test.cc b/src/libaktualizr/primary/empty_targets_test.cc index 8704f17a9d..251b0db6e4 100644 --- a/src/libaktualizr/primary/empty_targets_test.cc +++ b/src/libaktualizr/primary/empty_targets_test.cc @@ -3,7 +3,7 @@ #include #include "httpfake.h" -#include "primary/aktualizr.h" +#include "libaktualizr/aktualizr.h" #include "test_utils.h" #include "uptane_test_common.h" #include "utilities/fault_injection.h" @@ -30,7 +30,7 @@ class HttpRejectEmptyCorrId : public HttpFake { /* * Verify that we can successfully install an update after receiving - * subsequent targets metadata that is empty. + * subsequent Targets metadata that is empty. */ TEST(Aktualizr, EmptyTargets) { TemporaryDirectory temp_dir; @@ -66,7 +66,7 @@ TEST(Aktualizr, EmptyTargets) { EXPECT_EQ(update_result2.status, result::UpdateStatus::kUpdatesAvailable); result::Install install_result = aktualizr.Install(update_result2.updates).get(); - EXPECT_EQ(install_result.ecu_reports.size(), 1); + ASSERT_EQ(install_result.ecu_reports.size(), 1); EXPECT_EQ(install_result.ecu_reports[0].install_res.result_code.num_code, data::ResultCode::Numeric::kNeedCompletion); @@ -92,6 +92,56 @@ TEST(Aktualizr, EmptyTargets) { } } +/* Check that Aktualizr switches back to empty targets after failing to verify + * the Target matching. Also check that no updates are reported if any are + * invalid. */ +TEST(Aktualizr, EmptyTargetsAfterVerification) { + TemporaryDirectory temp_dir; + TemporaryDirectory meta_dir; + auto http = std::make_shared(temp_dir.Path(), meta_dir.Path() / "repo"); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + logger_set_threshold(boost::log::trivial::trace); + + // Add two images: a valid one for the Primary and an invalid for the + // Secondary. The Primary will get verified first and should succeed. + Process uptane_gen(uptane_generator_path.string()); + uptane_gen.run({"generate", "--path", meta_dir.PathString(), "--correlationid", "abc123"}); + uptane_gen.run({"image", "--path", meta_dir.PathString(), "--filename", "tests/test_data/firmware.txt", + "--targetname", "firmware.txt", "--hwid", "primary_hw"}); + uptane_gen.run({"addtarget", "--path", meta_dir.PathString(), "--targetname", "firmware.txt", "--hwid", "primary_hw", + "--serial", "CA:FE:A6:D2:84:9D"}); + uptane_gen.run({"image", "--path", meta_dir.PathString(), "--filename", "tests/test_data/firmware_name.txt", + "--targetname", "firmware_name.txt", "--hwid", "bad"}); + uptane_gen.run({"addtarget", "--path", meta_dir.PathString(), "--targetname", "firmware_name.txt", "--hwid", + "secondary_hw", "--serial", "secondary_ecu_serial"}); + uptane_gen.run({"signtargets", "--path", meta_dir.PathString(), "--correlationid", "abc123"}); + + // failing verification + auto storage = INvStorage::newStorage(conf.storage); + { + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + aktualizr.Initialize(); + + result::UpdateCheck update_result = aktualizr.CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kError); + EXPECT_EQ(update_result.ecus_count, 0); + EXPECT_TRUE(update_result.updates.empty()); + } + + // Backend reacts to failure: no need to install the target anymore + uptane_gen.run({"emptytargets", "--path", meta_dir.PathString()}); + uptane_gen.run({"signtargets", "--path", meta_dir.PathString(), "--correlationid", "abc123"}); + + // check that no update is available + { + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + aktualizr.Initialize(); + + result::UpdateCheck update_result = aktualizr.CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kNoUpdatesAvailable); + } +} + #ifdef FIU_ENABLE /* Check that Aktualizr switches back to empty targets after failing a @@ -193,7 +243,7 @@ TEST(Aktualizr, EmptyTargetsAfterInstall) { } } -#endif +#endif // FIU_ENABLE #ifndef __NO_MAIN__ int main(int argc, char **argv) { diff --git a/src/libaktualizr/primary/initializer.cc b/src/libaktualizr/primary/initializer.cc deleted file mode 100644 index 8cf4c81594..0000000000 --- a/src/libaktualizr/primary/initializer.cc +++ /dev/null @@ -1,261 +0,0 @@ -#include "initializer.h" - -#include - -#include -#include - -#include "bootstrap/bootstrap.h" -#include "crypto/keymanager.h" -#include "logging/logging.h" - -// Postcondition: device_id is in the storage -bool Initializer::initDeviceId() { - // if device_id is already stored, just return - std::string device_id; - if (storage_->loadDeviceId(&device_id)) { - return true; - } - - // if device_id is specified in config, just use it, otherwise generate a random one - device_id = config_.device_id; - if (device_id.empty()) { - if (config_.mode == ProvisionMode::kSharedCred) { - device_id = Utils::genPrettyName(); - } else if (config_.mode == ProvisionMode::kDeviceCred) { - device_id = keys_.getCN(); - } else { - LOG_ERROR << "Unknown provisioning method"; - return false; - } - } - - storage_->storeDeviceId(device_id); - return true; -} -void Initializer::resetDeviceId() { storage_->clearDeviceId(); } - -// Postcondition [(serial, hw_id)] is in the storage -bool Initializer::initEcuSerials() { - EcuSerials ecu_serials; - - // TODO: the assumption now is that the set of connected ECUs doesn't change, but it might obviously - // not be the case. ECU discovery seems to be a big story and should be worked on accordingly. - if (storage_->loadEcuSerials(&ecu_serials)) { - return true; - } - - std::string primary_ecu_serial_local = config_.primary_ecu_serial; - if (primary_ecu_serial_local.empty()) { - primary_ecu_serial_local = keys_.UptanePublicKey().KeyId(); - } - - std::string primary_ecu_hardware_id = config_.primary_ecu_hardware_id; - if (primary_ecu_hardware_id.empty()) { - primary_ecu_hardware_id = Utils::getHostname(); - if (primary_ecu_hardware_id == "") { - return false; - } - } - - ecu_serials.emplace_back(Uptane::EcuSerial(primary_ecu_serial_local), - Uptane::HardwareIdentifier(primary_ecu_hardware_id)); - - for (auto it = secondary_info_.begin(); it != secondary_info_.end(); ++it) { - ecu_serials.emplace_back(it->second->getSerial(), it->second->getHwId()); - } - - storage_->storeEcuSerials(ecu_serials); - return true; -} - -void Initializer::resetEcuSerials() { storage_->clearEcuSerials(); } - -// Postcondition: (public, private) is in the storage. It should not be stored until secondaries are provisioned -bool Initializer::initPrimaryEcuKeys() { return keys_.generateUptaneKeyPair().size() != 0u; } - -void Initializer::resetEcuKeys() { storage_->clearPrimaryKeys(); } - -bool Initializer::loadSetTlsCreds() { - keys_.copyCertsToCurl(*http_client_); - return keys_.isOk(); -} - -// Postcondition: TLS credentials are in the storage -InitRetCode Initializer::initTlsCreds() { - if (loadSetTlsCreds()) { - return InitRetCode::kOk; - } - - if (config_.mode != ProvisionMode::kSharedCred) { - LOG_ERROR << "Credentials not found"; - return InitRetCode::kStorageFailure; - } - - // Shared credential provision is required and possible => (automatically) - // provision with shared credentials. - - // set bootstrap credentials - Bootstrap boot(config_.provision_path, config_.p12_password); - http_client_->setCerts(boot.getCa(), CryptoSource::kFile, boot.getCert(), CryptoSource::kFile, boot.getPkey(), - CryptoSource::kFile); - - Json::Value data; - std::string device_id; - if (!storage_->loadDeviceId(&device_id)) { - LOG_ERROR << "Unknown device_id during shared credential provisioning."; - return InitRetCode::kStorageFailure; - } - data["deviceId"] = device_id; - data["ttl"] = config_.expiry_days; - HttpResponse response = http_client_->post(config_.server + "/devices", data); - if (!response.isOk()) { - Json::Value resp_code = response.getJson()["code"]; - if (resp_code.isString() && resp_code.asString() == "device_already_registered") { - LOG_ERROR << "Device id" << device_id << "is occupied"; - return InitRetCode::kOccupied; - } - LOG_ERROR << "Shared credential provisioning failed, response: " << response.body; - return InitRetCode::kServerFailure; - } - - std::string pkey; - std::string cert; - std::string ca; - StructGuard device_p12(BIO_new_mem_buf(response.body.c_str(), static_cast(response.body.size())), - BIO_vfree); - if (!Crypto::parseP12(device_p12.get(), "", &pkey, &cert, &ca)) { - LOG_ERROR << "Received a malformed P12 package from the server"; - return InitRetCode::kBadP12; - } - storage_->storeTlsCreds(ca, cert, pkey); - - // set provisioned credentials - if (!loadSetTlsCreds()) { - LOG_ERROR << "Failed to set provisioned credentials"; - return InitRetCode::kStorageFailure; - } - - LOG_INFO << "Provisioned successfully on Device Gateway"; - return InitRetCode::kOk; -} - -void Initializer::resetTlsCreds() { - if (config_.mode != ProvisionMode::kDeviceCred) { - storage_->clearTlsCreds(); - } -} - -// Postcondition: "ECUs registered" flag set in the storage -InitRetCode Initializer::initEcuRegister() { - if (storage_->loadEcuRegistered()) { - return InitRetCode::kOk; - } - - PublicKey uptane_public_key = keys_.UptanePublicKey(); - - if (uptane_public_key.Type() == KeyType::kUnknown) { - return InitRetCode::kStorageFailure; - } - - EcuSerials ecu_serials; - // initEcuSerials should have been called by this point - if (!storage_->loadEcuSerials(&ecu_serials) || ecu_serials.size() < 1) { - return InitRetCode::kStorageFailure; - } - - Json::Value all_ecus; - all_ecus["primary_ecu_serial"] = ecu_serials[0].first.ToString(); - all_ecus["ecus"] = Json::arrayValue; - { - Json::Value primary_ecu; - primary_ecu["hardware_identifier"] = ecu_serials[0].second.ToString(); - primary_ecu["ecu_serial"] = ecu_serials[0].first.ToString(); - primary_ecu["clientKey"] = keys_.UptanePublicKey().ToUptane(); - all_ecus["ecus"].append(primary_ecu); - } - - for (auto it = secondary_info_.cbegin(); it != secondary_info_.cend(); it++) { - Json::Value ecu; - auto public_key = it->second->getPublicKey(); - ecu["hardware_identifier"] = it->second->getHwId().ToString(); - ecu["ecu_serial"] = it->second->getSerial().ToString(); - ecu["clientKey"] = public_key.ToUptane(); - all_ecus["ecus"].append(ecu); - } - - HttpResponse response = http_client_->post(config_.ecu_registration_endpoint, all_ecus); - if (!response.isOk()) { - Json::Value resp_code = response.getJson()["code"]; - if (resp_code.isString() && - (resp_code.asString() == "ecu_already_registered" || resp_code.asString() == "device_already_registered")) { - LOG_ERROR << "One or more ECUs are unexpectedly already registered."; - return InitRetCode::kOccupied; - } - LOG_ERROR << "Error registering device on Uptane, response: " << response.body; - return InitRetCode::kServerFailure; - } - // do not call storage_->storeEcuRegistered(), it will be called from the top-level Init function after the - // acknowledgement - LOG_INFO << "ECUs have been successfully registered to the server."; - return InitRetCode::kOk; -} - -// Postcondition: "ECUs registered" flag set in the storage -Initializer::Initializer( - const ProvisionConfig& config_in, std::shared_ptr storage_in, - std::shared_ptr http_client_in, KeyManager& keys_in, - const std::map >& secondary_info_in) - : config_(config_in), - storage_(std::move(storage_in)), - http_client_(std::move(http_client_in)), - keys_(keys_in), - secondary_info_(secondary_info_in) { - success_ = false; - for (int i = 0; i < MaxInitializationAttempts; i++) { - if (!initDeviceId()) { - LOG_ERROR << "Device ID generation failed. Aborting initialization."; - return; - } - - InitRetCode ret_code = initTlsCreds(); - // if a device with the same ID has already been registered to the server, - // generate a new one - if (ret_code == InitRetCode::kOccupied) { - resetDeviceId(); - LOG_INFO << "Device name is already registered. Restarting."; - continue; - } else if (ret_code == InitRetCode::kStorageFailure) { - LOG_ERROR << "Error reading existing provisioning data from storage."; - return; - } else if (ret_code != InitRetCode::kOk) { - LOG_ERROR << "Shared credential provisioning failed. Aborting initialization."; - return; - } - - if (!initPrimaryEcuKeys()) { - LOG_ERROR << "ECU key generation failed. Aborting initialization."; - return; - } - if (!initEcuSerials()) { - LOG_ERROR << "ECU serial generation failed. Aborting initialization."; - return; - } - - ret_code = initEcuRegister(); - // if ECUs with same ID have been registered to the server, we don't have a - // clear remediation path right now, just ignore the error - if (ret_code == InitRetCode::kOccupied) { - LOG_INFO << "ECU serial is already registered."; - } else if (ret_code != InitRetCode::kOk) { - LOG_ERROR << "ECU registration failed. Aborting initialization."; - return; - } - - // TODO: acknowledge on server _before_ setting the flag - storage_->storeEcuRegistered(); - success_ = true; - return; - } - LOG_ERROR << "Initialization failed after " << MaxInitializationAttempts << " attempts."; -} diff --git a/src/libaktualizr/primary/initializer.h b/src/libaktualizr/primary/initializer.h deleted file mode 100644 index ae5d1f3df9..0000000000 --- a/src/libaktualizr/primary/initializer.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef INITIALIZER_H_ -#define INITIALIZER_H_ - -#include "config/config.h" -#include "crypto/keymanager.h" -#include "http/httpinterface.h" -#include "uptane/secondaryinterface.h" - -const int MaxInitializationAttempts = 3; - -enum class InitRetCode { kOk, kOccupied, kServerFailure, kStorageFailure, kSecondaryFailure, kBadP12, kPkcs11Failure }; - -class Initializer { - public: - Initializer(const ProvisionConfig& config_in, std::shared_ptr storage_in, - std::shared_ptr http_client_in, KeyManager& keys_in, - const std::map >& secondary_info_in); - bool isSuccessful() const { return success_; } - - private: - const ProvisionConfig& config_; - std::shared_ptr storage_; - std::shared_ptr http_client_; - KeyManager& keys_; - const std::map >& secondary_info_; - bool success_; - - bool initDeviceId(); - void resetDeviceId(); - bool initEcuSerials(); - void resetEcuSerials(); - bool initPrimaryEcuKeys(); - bool initSecondaryEcuKeys(); - void resetEcuKeys(); - InitRetCode initTlsCreds(); - void resetTlsCreds(); - InitRetCode initEcuRegister(); - bool loadSetTlsCreds(); // TODO -> metadownloader -}; - -#endif // INITIALIZER_H_ diff --git a/src/libaktualizr/primary/metadata_expiration_test.cc b/src/libaktualizr/primary/metadata_expiration_test.cc new file mode 100644 index 0000000000..f9d6f31771 --- /dev/null +++ b/src/libaktualizr/primary/metadata_expiration_test.cc @@ -0,0 +1,232 @@ +#include + +#include + +#include "httpfake.h" +#include "libaktualizr/aktualizr.h" +#include "test_utils.h" +#include "uptane_test_common.h" + +boost::filesystem::path uptane_generator_path; + +class MetadataExpirationTest : public ::testing::Test { + protected: + MetadataExpirationTest() : uptane_gen_(uptane_generator_path.string()) { + Process uptane_gen(uptane_generator_path.string()); + uptane_gen.run({"generate", "--path", meta_dir_.PathString()}); + + http_ = std::make_shared(temp_dir_.Path(), "", meta_dir_.Path() / "repo"); + conf_ = UptaneTestCommon::makeTestConfig(temp_dir_, http_->tls_server); + conf_.pacman.fake_need_reboot = true; + conf_.uptane.force_install_completion = true; + conf_.bootloader.reboot_sentinel_dir = temp_dir_.Path(); + + logger_set_threshold(boost::log::trivial::trace); + + storage_ = INvStorage::newStorage(conf_.storage); + aktualizr_ = std::make_shared(conf_, storage_, http_); + client_ = aktualizr_->uptane_client(); + } + + void addImage() { + uptane_gen_.run({"image", "--path", meta_dir_.PathString(), "--filename", "tests/test_data/firmware.txt", + "--targetname", target_filename_, "--hwid", "primary_hw"}); + + target_image_hash_ = boost::algorithm::to_lower_copy( + boost::algorithm::hex(Crypto::sha256digest(Utils::readFile("tests/test_data/firmware.txt")))); + } + + void addTarget(const std::string& target_filename, int expiration_delta = 0) { + if (expiration_delta != 0) { + time_t new_expiration_time; + std::time(&new_expiration_time); + new_expiration_time += expiration_delta; + struct tm new_expiration_time_str {}; + gmtime_r(&new_expiration_time, &new_expiration_time_str); + + uptane_gen_.run({"addtarget", "--path", meta_dir_.PathString(), "--targetname", target_filename, "--hwid", + "primary_hw", "--serial", "CA:FE:A6:D2:84:9D", "--expires", + TimeStamp(new_expiration_time_str).ToString()}); + + } else { + uptane_gen_.run({"addtarget", "--path", meta_dir_.PathString(), "--targetname", target_filename, "--hwid", + "primary_hw", "--serial", "CA:FE:A6:D2:84:9D"}); + } + } + + void addTargetAndSign(const std::string& target_filename, int expiration_delta = 0) { + addTarget(target_filename, expiration_delta); + uptane_gen_.run({"signtargets", "--path", meta_dir_.PathString()}); + } + + void refreshTargetsMetadata() { + // refresh the Targets metadata in the repo/Director + uptane_gen_.run({"refresh", "--path", meta_dir_.PathString(), "--repotype", "director", "--keyname", "targets"}); + } + + void addTargetToInstall(int expiration_delta = 0) { + addImage(); + addTargetAndSign(target_filename_, expiration_delta); + } + + void simulateReboot() { + client_.reset(); + aktualizr_->Shutdown(); + aktualizr_ = std::make_shared(conf_, storage_, http_); + aktualizr_->Initialize(); + client_ = aktualizr_->uptane_client(); + } + + protected: + Process uptane_gen_; + const std::string target_filename_ = "firmware.txt"; + std::string target_image_hash_; + + TemporaryDirectory meta_dir_; + TemporaryDirectory temp_dir_; + std::shared_ptr storage_; + Config conf_; + std::shared_ptr http_; + std::shared_ptr aktualizr_; + std::shared_ptr client_; +}; + +TEST_F(MetadataExpirationTest, MetadataExpirationBeforeInstallation) { + aktualizr_->Initialize(); + result::UpdateCheck update_result = aktualizr_->CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kNoUpdatesAvailable); + + addTargetToInstall(-1); + + // run the uptane cycle an try to install the target + aktualizr_->UptaneCycle(); + + ASSERT_FALSE(client_->hasPendingUpdates()); + ASSERT_FALSE(client_->isInstallCompletionRequired()); + + { + auto currently_installed_target = client_->getCurrent(); + EXPECT_NE(target_image_hash_, currently_installed_target.sha256Hash()); + EXPECT_NE(target_filename_, currently_installed_target.filename()); + } + refreshTargetsMetadata(); + + // run the uptane cycle an try to install the target + aktualizr_->UptaneCycle(); + + // check if the target has been installed and pending to be applied after a reboot + ASSERT_TRUE(client_->hasPendingUpdates()); + ASSERT_TRUE(client_->isInstallCompletionRequired()); + + // force reboot + client_->completeInstall(); + + // emulate aktualizr fresh start + simulateReboot(); + + aktualizr_->UptaneCycle(); + + ASSERT_FALSE(client_->hasPendingUpdates()); + ASSERT_FALSE(client_->isInstallCompletionRequired()); + { + auto currently_installed_target = client_->getCurrent(); + EXPECT_EQ(target_image_hash_, currently_installed_target.sha256Hash()); + EXPECT_EQ(target_filename_, currently_installed_target.filename()); + } +} + +TEST_F(MetadataExpirationTest, MetadataExpirationAfterInstallationAndBeforeReboot) { + aktualizr_->Initialize(); + + result::UpdateCheck update_result = aktualizr_->CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kNoUpdatesAvailable); + + const int expiration_in_sec = 5; + addTargetToInstall(expiration_in_sec); + auto target_init_time = std::chrono::system_clock::now(); + + // run the uptane cycle to install the target + aktualizr_->UptaneCycle(); + + // check if the target has been installed and pending to be applied after a reboot + ASSERT_TRUE(client_->hasPendingUpdates()); + ASSERT_TRUE(client_->isInstallCompletionRequired()); + + // emulate the target metadata expiration while the uptane cycle is running + std::this_thread::sleep_for(std::chrono::seconds(expiration_in_sec) - + (std::chrono::system_clock::now() - target_init_time)); + aktualizr_->UptaneCycle(); + + // since the installation happenned before the metadata expiration we expect that + // the update is still pending and will be applied after a reboot + ASSERT_TRUE(client_->hasPendingUpdates()); + ASSERT_TRUE(client_->isInstallCompletionRequired()); + + // force reboot + client_->completeInstall(); + + // emulate aktualizr fresh start + simulateReboot(); + + aktualizr_->UptaneCycle(); + + // check if the pending target has been applied. it should be applied in even if it's metadata are expired + // as long as it was installed at the moment when they were not expired + { + auto currently_installed_target = client_->getCurrent(); + EXPECT_EQ(target_image_hash_, currently_installed_target.sha256Hash()); + EXPECT_EQ(target_filename_, currently_installed_target.filename()); + } +} + +TEST_F(MetadataExpirationTest, MetadataExpirationAfterInstallationAndBeforeApplication) { + aktualizr_->Initialize(); + + result::UpdateCheck update_result = aktualizr_->CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kNoUpdatesAvailable); + + const int expiration_in_sec = 5; + addTargetToInstall(expiration_in_sec); + auto target_init_time = std::chrono::system_clock::now(); + + // run the uptane cycle to install the target + aktualizr_->UptaneCycle(); + + // check if the target has been installed and pending to be applied after a reboot + ASSERT_TRUE(client_->hasPendingUpdates()); + ASSERT_TRUE(client_->isInstallCompletionRequired()); + + // wait until the target metadata are expired + // emulate the target metadata expiration while the Uptane cycle is running + std::this_thread::sleep_for(std::chrono::seconds(expiration_in_sec) - + (std::chrono::system_clock::now() - target_init_time)); + + // force reboot + client_->completeInstall(); + + // emulate aktualizr fresh start + simulateReboot(); + aktualizr_->UptaneCycle(); + + // check if the pending target has been applied. it should be applied in even if it's metadta are expired + // as long as it was installed at the moment when they were not expired + auto currently_installed_target = client_->getCurrent(); + EXPECT_EQ(target_image_hash_, currently_installed_target.sha256Hash()); + EXPECT_EQ(target_filename_, currently_installed_target.filename()); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + if (argc != 2) { + std::cerr << "Error: " << argv[0] << " requires the path to the uptane-generator utility\n"; + return EXIT_FAILURE; + } + uptane_generator_path = argv[1]; + + logger_init(); + logger_set_threshold(boost::log::trivial::trace); + + return RUN_ALL_TESTS(); +} + +// vim: set tabstop=2 shiftwidth=2 expandtab: diff --git a/src/libaktualizr/primary/metadata_fetch_test.cc b/src/libaktualizr/primary/metadata_fetch_test.cc new file mode 100644 index 0000000000..913a168c9a --- /dev/null +++ b/src/libaktualizr/primary/metadata_fetch_test.cc @@ -0,0 +1,154 @@ +#include + +#include + +#include "httpfake.h" +#include "libaktualizr/aktualizr.h" +#include "test_utils.h" +#include "uptane_repo.h" +#include "uptane_test_common.h" + +class HttpFakeMetaCounter : public HttpFake { + public: + HttpFakeMetaCounter(const boost::filesystem::path &test_dir_in, const boost::filesystem::path &meta_dir_in) + : HttpFake(test_dir_in, "", meta_dir_in) {} + + HttpResponse get(const std::string &url, int64_t maxsize) override { + if (url.find("director/1.root.json") != std::string::npos) { + ++director_1root_count; + } + if (url.find("director/2.root.json") != std::string::npos) { + ++director_2root_count; + } + if (url.find("director/targets.json") != std::string::npos) { + ++director_targets_count; + } + if (url.find("repo/1.root.json") != std::string::npos) { + ++image_1root_count; + } + if (url.find("repo/2.root.json") != std::string::npos) { + ++image_2root_count; + } + if (url.find("repo/timestamp.json") != std::string::npos) { + ++image_timestamp_count; + } + if (url.find("repo/snapshot.json") != std::string::npos) { + ++image_snapshot_count; + } + if (url.find("repo/targets.json") != std::string::npos) { + ++image_targets_count; + } + + return HttpFake::get(url, maxsize); + } + + int director_1root_count{0}; + int director_2root_count{0}; + int director_targets_count{0}; + int image_1root_count{0}; + int image_2root_count{0}; + int image_timestamp_count{0}; + int image_snapshot_count{0}; + int image_targets_count{0}; +}; + +/* + * Don't download Image repo metadata if Director reports no new targets. Don't + * download Snapshot and Targets metadata from the Image repo if the Timestamp + * indicates nothing has changed. + */ +TEST(Aktualizr, MetadataFetch) { + TemporaryDirectory temp_dir; + TemporaryDirectory meta_dir; + auto http = std::make_shared(temp_dir.Path(), meta_dir.Path() / "repo"); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + logger_set_threshold(boost::log::trivial::trace); + + auto storage = INvStorage::newStorage(conf.storage); + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + aktualizr.Initialize(); + + // No updates scheduled: only download Director Root and Targets metadata. + UptaneRepo uptane_repo_{meta_dir.PathString(), "", ""}; + uptane_repo_.generateRepo(KeyType::kED25519); + + result::UpdateCheck update_result = aktualizr.CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kNoUpdatesAvailable); + EXPECT_EQ(http->director_1root_count, 1); + EXPECT_EQ(http->director_2root_count, 1); + EXPECT_EQ(http->director_targets_count, 1); + EXPECT_EQ(http->image_1root_count, 0); + EXPECT_EQ(http->image_2root_count, 0); + EXPECT_EQ(http->image_timestamp_count, 0); + EXPECT_EQ(http->image_snapshot_count, 0); + EXPECT_EQ(http->image_targets_count, 0); + + // Two images added, but only one update scheduled: all metadata objects + // should be fetched once. + uptane_repo_.addImage("tests/test_data/firmware.txt", "firmware.txt", "primary_hw"); + uptane_repo_.addImage("tests/test_data/firmware_name.txt", "firmware_name.txt", "primary_hw"); + uptane_repo_.addTarget("firmware.txt", "primary_hw", "CA:FE:A6:D2:84:9D"); + uptane_repo_.addDelegation(Uptane::Role("role-abc", true), Uptane::Role("targets", false), "abc/*", false, + KeyType::kED25519); + uptane_repo_.signTargets(); + + update_result = aktualizr.CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kUpdatesAvailable); + EXPECT_EQ(http->director_1root_count, 1); + EXPECT_EQ(http->director_2root_count, 2); + EXPECT_EQ(http->director_targets_count, 2); + EXPECT_EQ(http->image_1root_count, 1); + EXPECT_EQ(http->image_2root_count, 1); + EXPECT_EQ(http->image_timestamp_count, 1); + EXPECT_EQ(http->image_snapshot_count, 1); + EXPECT_EQ(http->image_targets_count, 1); + + // Update scheduled with pre-existing image: no need to refetch Image repo + // Snapshot or Targets metadata. + uptane_repo_.emptyTargets(); + uptane_repo_.addTarget("firmware_name.txt", "primary_hw", "CA:FE:A6:D2:84:9D"); + uptane_repo_.signTargets(); + + update_result = aktualizr.CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kUpdatesAvailable); + EXPECT_EQ(http->director_1root_count, 1); + EXPECT_EQ(http->director_2root_count, 3); + EXPECT_EQ(http->director_targets_count, 3); + EXPECT_EQ(http->image_1root_count, 1); + EXPECT_EQ(http->image_2root_count, 2); + EXPECT_EQ(http->image_timestamp_count, 2); + EXPECT_EQ(http->image_snapshot_count, 1); + EXPECT_EQ(http->image_targets_count, 1); + + // Delegation added to an existing delegation; update scheduled with + // pre-existing image: Snapshot must be refetched, but Targets are unchanged. + uptane_repo_.emptyTargets(); + uptane_repo_.addTarget("firmware.txt", "primary_hw", "CA:FE:A6:D2:84:9D"); + uptane_repo_.addDelegation(Uptane::Role("role-def", true), Uptane::Role("role-abc", true), "def/*", false, + KeyType::kED25519); + uptane_repo_.signTargets(); + + update_result = aktualizr.CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kUpdatesAvailable); + EXPECT_EQ(http->director_1root_count, 1); + EXPECT_EQ(http->director_2root_count, 4); + EXPECT_EQ(http->director_targets_count, 4); + EXPECT_EQ(http->image_1root_count, 1); + EXPECT_EQ(http->image_2root_count, 3); + EXPECT_EQ(http->image_timestamp_count, 3); + EXPECT_EQ(http->image_snapshot_count, 2); + EXPECT_EQ(http->image_targets_count, 1); +} + +#ifndef __NO_MAIN__ +int main(int argc, char **argv) { + ::testing::InitGoogleTest(&argc, argv); + + logger_init(); + logger_set_threshold(boost::log::trivial::trace); + + return RUN_ALL_TESTS(); +} +#endif + +// vim: set tabstop=2 shiftwidth=2 expandtab: diff --git a/src/libaktualizr/primary/provisioner.cc b/src/libaktualizr/primary/provisioner.cc new file mode 100644 index 0000000000..cc1a9b3bc2 --- /dev/null +++ b/src/libaktualizr/primary/provisioner.cc @@ -0,0 +1,393 @@ +#include "provisioner.h" + +#include + +#include +#include + +#include "bootstrap/bootstrap.h" +#include "crypto/crypto.h" +#include "crypto/keymanager.h" +#include "logging/logging.h" + +using std::map; +using std::move; +using std::shared_ptr; + +Provisioner::Provisioner(const ProvisionConfig& config, shared_ptr storage, + shared_ptr http_client, shared_ptr key_manager, + const map>& secondaries) + : config_(config), + storage_(move(storage)), + http_client_(move(http_client)), + key_manager_(move(key_manager)), + secondaries_(secondaries) {} + +void Provisioner::SecondariesWereChanged() { current_state_ = State::kUnknown; } + +void Provisioner::Prepare() { + initEcuSerials(); + initSecondaryInfo(); +} + +bool Provisioner::Attempt() { + try { + Prepare(); + try { + initTlsCreds(); + } catch (const ServerOccupied& e) { + // if a device with the same ID has already been registered to the server, + // generate a new one + storage_->clearDeviceId(); + device_id_.clear(); + LOG_ERROR << "Device name is already registered. Retrying."; + throw; + } + + initEcuRegister(); + + initEcuReportCounter(); + + current_state_ = State::kOk; + return true; + } catch (const Provisioner::Error& ex) { + last_error_ = ex.what(); + current_state_ = State::kTemporaryError; + return false; + } catch (const std::exception& ex) { + LOG_DEBUG << "Provisioner::Attempt() caught an exception not deriving from Provisioner::Error"; + last_error_ = ex.what(); + current_state_ = State::kTemporaryError; + return false; + } +} + +bool Provisioner::ShouldAttemptAgain() const { + return current_state_ == State::kUnknown || current_state_ == State::kTemporaryError; +} + +Uptane::EcuSerial Provisioner::PrimaryEcuSerial() { + if (primary_ecu_serial_ != Uptane::EcuSerial::Unknown()) { + return primary_ecu_serial_; + } + + std::string key_pair; + try { + // If the key pair already exists, this loads it from storage. + key_pair = key_manager_->generateUptaneKeyPair(); + } catch (const std::exception& e) { + throw KeyGenerationError(e.what()); + } + + if (key_pair.empty()) { + throw KeyGenerationError("Unknown error"); + } + + std::string primary_ecu_serial_str = config_.primary_ecu_serial; + if (primary_ecu_serial_str.empty()) { + primary_ecu_serial_str = key_manager_->UptanePublicKey().KeyId(); + } + primary_ecu_serial_ = Uptane::EcuSerial(primary_ecu_serial_str); + + // assert that the new serial is sane + if (primary_ecu_serial_ == Uptane::EcuSerial::Unknown()) { + throw std::logic_error("primary_ecu_serial_ is still Unknown"); + } + + return primary_ecu_serial_; +} + +Uptane::HardwareIdentifier Provisioner::PrimaryHardwareIdentifier() { + if (primary_ecu_hardware_id_ != Uptane::HardwareIdentifier::Unknown()) { + return primary_ecu_hardware_id_; + } + std::string primary_ecu_hardware_id_str = config_.primary_ecu_hardware_id; + if (primary_ecu_hardware_id_str.empty()) { + primary_ecu_hardware_id_str = Utils::getHostname(); + if (primary_ecu_hardware_id_str.empty()) { + throw Error("Could not get current host name, please configure an hardware ID explicitly"); + } + } + + primary_ecu_hardware_id_ = Uptane::HardwareIdentifier(primary_ecu_hardware_id_str); + + // Assert the new value is sane + if (primary_ecu_hardware_id_ == Uptane::HardwareIdentifier::Unknown()) { + throw std::logic_error("primary_ecu_hardware_id_ is still Unknown"); + } + + return primary_ecu_hardware_id_; +} + +std::string Provisioner::DeviceId() { + if (device_id_.empty()) { + // Try loading it + storage_->loadDeviceId(&device_id_); + } + + if (!device_id_.empty()) { + return device_id_; + } + + LOG_WARNING << "No device ID yet..."; + // If device_id is specified in the config, use that. + device_id_ = config_.device_id; + if (device_id_.empty()) { + LOG_WARNING << "device_id is empty... generating"; + // Otherwise, try to read the device certificate if it is available. + try { + device_id_ = key_manager_->getCN(); + } catch (const std::exception& e) { + // No certificate: for device credential provisioning, abort. For shared + // credential provisioning, generate a random name. + if (config_.mode == ProvisionMode::kSharedCred || config_.mode == ProvisionMode::kSharedCredReuse) { + device_id_ = Utils::genPrettyName(); + } else if (config_.mode == ProvisionMode::kDeviceCred) { + throw e; + } else { + throw Error("Unknown provisioning method"); + } + } + } + if (!device_id_.empty()) { + storage_->storeDeviceId(device_id_); + } + return device_id_; +} + +bool Provisioner::loadSetTlsCreds() { + key_manager_->copyCertsToCurl(*http_client_); + return key_manager_->isOk(); +} + +// Postcondition: +// - TLS credentials are in the storage +// - This device_id is provisioned on the device gateway +void Provisioner::initTlsCreds() { + if (loadSetTlsCreds()) { + return; + } + + if (config_.mode == ProvisionMode::kDeviceCred) { + throw StorageError("Device credentials expected but not found"); + } + + // Shared credential provisioning is required and possible => (automatically) + // provision with shared credentials. + + // Set bootstrap (shared) credentials. + Bootstrap boot(config_.provision_path, config_.p12_password); + http_client_->setCerts(boot.getCa(), CryptoSource::kFile, boot.getCert(), CryptoSource::kFile, boot.getPkey(), + CryptoSource::kFile); + + Json::Value data; + data["deviceId"] = DeviceId(); + data["ttl"] = config_.expiry_days; + HttpResponse response = http_client_->post(config_.server + "/devices", data); + if (!response.isOk()) { + Json::Value resp_code; + try { + resp_code = response.getJson()["code"]; + } catch (const std::exception& ex) { + LOG_ERROR << "Unable to parse response code from device registration: " << ex.what(); + throw ServerError(ex.what()); + } + if (resp_code.isString() && resp_code.asString() == "device_already_registered") { + LOG_ERROR << "Device ID " << DeviceId() << " is already registered."; + throw ServerOccupied(); + } + const auto err = std::string("Shared credential provisioning failed: ") + + std::to_string(response.http_status_code) + " " + response.body; + throw ServerError(err); + } + + std::string pkey; + std::string cert; + std::string ca; + StructGuard device_p12(BIO_new_mem_buf(response.body.c_str(), static_cast(response.body.size())), + BIO_vfree); + if (!Crypto::parseP12(device_p12.get(), "", &pkey, &cert, &ca)) { + throw ServerError("Received malformed device credentials from the server"); + } + storage_->storeTlsCreds(ca, cert, pkey); + + // Set provisioned (device) credentials. + if (!loadSetTlsCreds()) { + throw Error("Failed to configure HTTP client with device credentials."); + } + + if (config_.mode != ProvisionMode::kSharedCredReuse) { + // Remove shared provisioning credentials from the archive; we have no more + // use for them. + Utils::removeFileFromArchive(config_.provision_path, "autoprov_credentials.p12"); + // Remove the treehub.json if it's still there. It shouldn't have been put on + // the device, but it has happened before. + try { + Utils::removeFileFromArchive(config_.provision_path, "treehub.json"); + } catch (...) { + } + } + + LOG_INFO << "Provisioned successfully on Device Gateway."; +} + +// Postcondition [(serial, hw_id)] is in the storage +void Provisioner::initEcuSerials() { + EcuSerials stored_ecu_serials; + storage_->loadEcuSerials(&stored_ecu_serials); + + new_ecu_serials_.clear(); + new_ecu_serials_.emplace_back(PrimaryEcuSerial(), PrimaryHardwareIdentifier()); + for (const auto& s : secondaries_) { + new_ecu_serials_.emplace_back(s.first, s.second->getHwId()); + } + + register_ecus_ = stored_ecu_serials.empty(); + if (!register_ecus_) { + // We should probably clear the misconfigured_ecus table once we have + // consent working. + std::vector found(stored_ecu_serials.size(), false); + + EcuCompare primary_comp(new_ecu_serials_[0]); + EcuSerials::const_iterator store_it; + store_it = std::find_if(stored_ecu_serials.cbegin(), stored_ecu_serials.cend(), primary_comp); + if (store_it == stored_ecu_serials.cend()) { + LOG_INFO << "Configured Primary ECU serial " << new_ecu_serials_[0].first << " with hardware ID " + << new_ecu_serials_[0].second << " not found in storage."; + register_ecus_ = true; + } else { + found[static_cast(store_it - stored_ecu_serials.cbegin())] = true; + } + + // Check all configured Secondaries to see if any are new. + for (auto it = secondaries_.cbegin(); it != secondaries_.cend(); ++it) { + EcuCompare secondary_comp(std::make_pair(it->second->getSerial(), it->second->getHwId())); + store_it = std::find_if(stored_ecu_serials.cbegin(), stored_ecu_serials.cend(), secondary_comp); + if (store_it == stored_ecu_serials.cend()) { + LOG_INFO << "Configured Secondary ECU serial " << it->second->getSerial() << " with hardware ID " + << it->second->getHwId() << " not found in storage."; + register_ecus_ = true; + } else { + found[static_cast(store_it - stored_ecu_serials.cbegin())] = true; + } + } + + // Check all stored Secondaries not already matched to see if any have been + // removed. Store them in a separate table to keep track of them. + std::vector::iterator found_it; + for (found_it = found.begin(); found_it != found.end(); ++found_it) { + if (!*found_it) { + auto not_registered = stored_ecu_serials[static_cast(found_it - found.begin())]; + LOG_INFO << "ECU serial " << not_registered.first << " with hardware ID " << not_registered.second + << " in storage was not found in Secondary configuration."; + register_ecus_ = true; + storage_->saveMisconfiguredEcu({not_registered.first, not_registered.second, EcuState::kOld}); + } + } + } +} + +void Provisioner::initSecondaryInfo() { + sec_info_.clear(); + for (const auto& s : secondaries_) { + const Uptane::EcuSerial serial = s.first; + SecondaryInterface& sec = *s.second; + + SecondaryInfo info; + // If upgrading from the older version of the storage without the + // secondary_ecus table, we need to migrate the data. This should be done + // regardless of whether we need to (re-)register the ECUs. + // The ECU serials should be already initialized by this point. + if (!storage_->loadSecondaryInfo(serial, &info) || info.type.empty() || info.pub_key.Type() == KeyType::kUnknown) { + info.serial = serial; + info.hw_id = sec.getHwId(); + info.type = sec.Type(); + const PublicKey& p = sec.getPublicKey(); + if (p.Type() != KeyType::kUnknown) { + info.pub_key = p; + } + // If we don't need to register the ECUs, we still need to store this info + // to complete the migration. + if (!register_ecus_) { + storage_->saveSecondaryInfo(info.serial, info.type, info.pub_key); + } + } + // We will need this info later if the device is not yet provisioned + sec_info_.push_back(std::move(info)); + } +} + +// Postcondition: "ECUs registered" flag set in the storage +void Provisioner::initEcuRegister() { + // Allow re-registration if the ECUs have changed. + if (!register_ecus_) { + LOG_DEBUG << "All ECUs are already registered with the server."; + return; + } + + PublicKey uptane_public_key = key_manager_->UptanePublicKey(); + + if (uptane_public_key.Type() == KeyType::kUnknown) { + throw StorageError("Invalid key in storage"); + } + + Json::Value all_ecus; + all_ecus["primary_ecu_serial"] = new_ecu_serials_[0].first.ToString(); + all_ecus["ecus"] = Json::arrayValue; + { + Json::Value primary_ecu; + primary_ecu["hardware_identifier"] = new_ecu_serials_[0].second.ToString(); + primary_ecu["ecu_serial"] = new_ecu_serials_[0].first.ToString(); + primary_ecu["clientKey"] = key_manager_->UptanePublicKey().ToUptane(); + all_ecus["ecus"].append(primary_ecu); + } + + for (const auto& info : sec_info_) { + Json::Value ecu; + ecu["hardware_identifier"] = info.hw_id.ToString(); + ecu["ecu_serial"] = info.serial.ToString(); + ecu["clientKey"] = info.pub_key.ToUptane(); + all_ecus["ecus"].append(ecu); + } + + HttpResponse response = http_client_->post(config_.ecu_registration_endpoint, all_ecus); + if (!response.isOk()) { + Json::Value resp_code = response.getJson()["code"]; + if (resp_code.isString() && + (resp_code.asString() == "ecu_already_registered" || resp_code.asString() == "device_already_registered")) { + throw ServerError("One or more ECUs are unexpectedly already registered"); + } + const auto err = + std::string("Error registering device: ") + std::to_string(response.http_status_code) + " " + response.body; + throw ServerError(err); + } + + // Only store the changes if we successfully registered the ECUs. + storage_->storeEcuSerials(new_ecu_serials_); + for (const auto& info : sec_info_) { + storage_->saveSecondaryInfo(info.serial, info.type, info.pub_key); + } + // Create a DeviceId if it hasn't been done already. This is necessary + // because storeDeviceId() resets the is_registered flag and storeEcuRegistered() + // requires there to be a DeviceID in the device_info table already. + DeviceId(); + storage_->storeEcuRegistered(); + + LOG_INFO << "ECUs have been successfully registered with the server."; +} + +void Provisioner::initEcuReportCounter() { + std::vector> ecu_cnt; + + if (storage_->loadEcuReportCounter(&ecu_cnt)) { + return; + } + + EcuSerials ecu_serials; + + if (!storage_->loadEcuSerials(&ecu_serials) || ecu_serials.empty()) { + throw Error("Could not load ECU serials"); + } + + storage_->saveEcuReportCounter(Uptane::EcuSerial(ecu_serials[0].first.ToString()), 0); +} diff --git a/src/libaktualizr/primary/provisioner.h b/src/libaktualizr/primary/provisioner.h new file mode 100644 index 0000000000..a10d9d98eb --- /dev/null +++ b/src/libaktualizr/primary/provisioner.h @@ -0,0 +1,190 @@ +#ifndef INITIALIZER_H_ +#define INITIALIZER_H_ + +#include + +#include "libaktualizr/secondaryinterface.h" + +#include "crypto/keymanager.h" +#include "http/httpinterface.h" +#include "libaktualizr/config.h" +#include "storage/invstorage.h" +#include "uptane/tuf.h" + +class Provisioner { + public: + enum class State { + kUnknown = 0, + kOk, + kTemporaryError, + // Note there is no 'Permanent' error here, because all the failure modes we have so far may recover + }; + + /** + * Provisioner gets the local system, represented by config, storage, key_manager and secondaries properly registered + * on the server (represented by http_client). The constructor doesn't do any work. Calling bool Attempt() will make + * one provisioning attempt (if necessary) and return true if provisioning is done. + */ + Provisioner(const ProvisionConfig& config, std::shared_ptr storage, + std::shared_ptr http_client, std::shared_ptr key_manager, + const std::map >& secondaries); + + /** + * Notify Provisioner that the secondaries passed in via the constructor have + * changed. + * This will revert the provisioning state, so that Attempt() will cause + * provisioning to be attempted again. + */ + void SecondariesWereChanged(); + + /** + * Perform as much of provisioning as is possible without contacting a + * remote server. Secondaries are still contacted over the local networking. + * This is safe to call redundantly. + */ + void Prepare(); + + /** + * Make one attempt at provisioning, if the provisioning hasn't already completed. + * If provisioning is already successful this is a no-op. + * use like: + * if (!provisioner_.Attempt()) { + * return error; + * } + * // Provisioned. Carry on as normal + * @returns whether the device is provisioned + */ + bool Attempt(); + + State CurrentState() const { return current_state_; } + + /** + * A textual description of the last cause for provisioning to fail. + */ + std::string LastError() const { return last_error_; }; + + /** + * Is is CurrentState() either kUnknown or kTemporaryError? + * To keep trying until provisioning succeeds or the retry count is hit, do: + * while(provisioner.ShouldAttemptAgain()) { provisioner.MakeAttempt(); } + * @return + */ + bool ShouldAttemptAgain() const; + + /** + * Get the ECU Serial for the Primary, lazily creating and storing it if necessary + */ + Uptane::EcuSerial PrimaryEcuSerial(); + + /** + * Get the Hardware Identifier for the Primary, lazily creating and storing it if necessary + */ + Uptane::HardwareIdentifier PrimaryHardwareIdentifier(); + + /** + * Get the Device ID for this vehicle, lazily creating and storing it if necessary. + * One Device ID covers a set of ECUs. + * @return The Device ID + */ + std::string DeviceId(); + + private: + class Error : public std::runtime_error { + public: + explicit Error(const std::string& what) : std::runtime_error(std::string("Initializer error: ") + what) {} + }; + + class KeyGenerationError : public Error { + public: + explicit KeyGenerationError(const std::string& what) + : Error(std::string("Could not generate Uptane key pair: ") + what) {} + }; + + class StorageError : public Error { + public: + explicit StorageError(const std::string& what) : Error(std::string("Storage error: ") + what) {} + }; + + class ServerError : public Error { + public: + explicit ServerError(const std::string& what) : Error(std::string("Server error: ") + what) {} + }; + + class ServerOccupied : public Error { + public: + ServerOccupied() : Error("device ID is already registered") {} + }; + + class EcuCompare { + public: + explicit EcuCompare(std::pair ecu_in) + : serial(std::move(ecu_in.first)), hardware_id(std::move(ecu_in.second)) {} + bool operator()(const std::pair& in) const { + return (in.first == serial && in.second == hardware_id); + } + + private: + const Uptane::EcuSerial serial; + const Uptane::HardwareIdentifier hardware_id; + }; + + /** + * Requires an Uptane key pair + * Failure modes: + * - Can't contact secondaries + */ + void initEcuSerials(); + + /** + * Failure modes: + * - Can't contact secondaries + */ + void initSecondaryInfo(); + + /** + * Failure modes: + * - Can't contact server / offline + */ + void initTlsCreds(); + + /** + * Update http_client_ with the TLS certs from key_manager_ + * @return Whether the keys were available and loaded + */ + bool loadSetTlsCreds(); + + /** + * Registers the ECUs with the server. + * Stores ECU information locally + * + * Failure modes: + * - Can't contact server / offline + */ + void initEcuRegister(); + + /** + * Initializes the 'ecu_report_counter' table to zero + * Requires the ECU serials are setup + * Requires the ECUs are registered on the server + */ + void initEcuReportCounter(); + + const ProvisionConfig& config_; + std::shared_ptr storage_; + std::shared_ptr http_client_; + std::shared_ptr key_manager_; + // Lazily initialized by DeviceId() + std::string device_id_; + // Lazily initialized by PrimaryEcuSerial() + Uptane::EcuSerial primary_ecu_serial_{Uptane::EcuSerial::Unknown()}; + // Lazily initialized by PrimaryHardwareIdentifier() + Uptane::HardwareIdentifier primary_ecu_hardware_id_{Uptane::HardwareIdentifier::Unknown()}; + const std::map& secondaries_; + std::vector sec_info_; + EcuSerials new_ecu_serials_; + bool register_ecus_{false}; + State current_state_{State::kUnknown}; + std::string last_error_; +}; + +#endif // INITIALIZER_H_ diff --git a/src/libaktualizr/primary/provisioner_test.cc b/src/libaktualizr/primary/provisioner_test.cc new file mode 100644 index 0000000000..f083441639 --- /dev/null +++ b/src/libaktualizr/primary/provisioner_test.cc @@ -0,0 +1,469 @@ +#include + +#include + +#include + +#include "httpfake.h" +#include "primary/provisioner.h" +#include "primary/provisioner_test_utils.h" +#include "primary/sotauptaneclient.h" +#include "storage/invstorage.h" +#include "utilities/utils.h" + +using std::string; +using Uptane::EcuSerial; +using Uptane::HardwareIdentifier; + +/* + * Check that aktualizr creates provisioning data if they don't exist already. + */ +TEST(Provisioner, Success) { + RecordProperty("zephyr_key", "OTA-983,TST-153"); + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path()); + Config conf("tests/config/basic.toml"); + conf.uptane.director_server = http->tls_server + "/director"; + conf.uptane.repo_server = http->tls_server + "/repo"; + conf.tls.server = http->tls_server; + conf.storage.path = temp_dir.Path(); + conf.provision.primary_ecu_serial = "testecuserial"; + + // First make sure nothing is already there. + auto storage = INvStorage::newStorage(conf.storage); + std::string pkey; + std::string cert; + std::string ca; + EXPECT_FALSE(storage->loadTlsCreds(&ca, &cert, &pkey)); + std::string public_key; + std::string private_key; + EXPECT_FALSE(storage->loadPrimaryKeys(&public_key, &private_key)); + + // Initialize. + auto keys = std::make_shared(storage, conf.keymanagerConfig()); + ExpectProvisionOK(Provisioner(conf.provision, storage, http, keys, {})); + + // Then verify that the storage contains what we expect. + EXPECT_TRUE(storage->loadTlsCreds(&ca, &cert, &pkey)); + EXPECT_NE(ca, ""); + EXPECT_NE(cert, ""); + EXPECT_NE(pkey, ""); + EXPECT_TRUE(storage->loadPrimaryKeys(&public_key, &private_key)); + EXPECT_NE(public_key, ""); + EXPECT_NE(private_key, ""); + + const Json::Value ecu_data = Utils::parseJSONFile(temp_dir.Path() / "post.json"); + EXPECT_EQ(ecu_data["ecus"].size(), 1); + EXPECT_EQ(ecu_data["ecus"][0]["clientKey"]["keyval"]["public"].asString(), public_key); + EXPECT_EQ(ecu_data["ecus"][0]["ecu_serial"].asString(), conf.provision.primary_ecu_serial); + EXPECT_NE(ecu_data["ecus"][0]["hardware_identifier"].asString(), ""); + EXPECT_EQ(ecu_data["primary_ecu_serial"].asString(), conf.provision.primary_ecu_serial); +} + +/* + * Check that aktualizr does NOT change provisioning data if they DO exist + * already. + */ +TEST(Provisioner, InitializeTwice) { + RecordProperty("zephyr_key", "OTA-983,TST-154"); + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path()); + Config conf("tests/config/basic.toml"); + conf.storage.path = temp_dir.Path(); + conf.provision.primary_ecu_serial = "testecuserial"; + + // First make sure nothing is already there. + auto storage = INvStorage::newStorage(conf.storage); + std::string pkey1; + std::string cert1; + std::string ca1; + EXPECT_FALSE(storage->loadTlsCreds(&ca1, &cert1, &pkey1)); + std::string public_key1; + std::string private_key1; + EXPECT_FALSE(storage->loadPrimaryKeys(&public_key1, &private_key1)); + + // Initialize and verify that the storage contains what we expect. + { + auto keys = std::make_shared(storage, conf.keymanagerConfig()); + + ExpectProvisionOK(Provisioner(conf.provision, storage, http, keys, {})); + + EXPECT_TRUE(storage->loadTlsCreds(&ca1, &cert1, &pkey1)); + EXPECT_NE(ca1, ""); + EXPECT_NE(cert1, ""); + EXPECT_NE(pkey1, ""); + EXPECT_TRUE(storage->loadPrimaryKeys(&public_key1, &private_key1)); + EXPECT_NE(public_key1, ""); + EXPECT_NE(private_key1, ""); + } + + // Initialize again and verify that nothing has changed. + { + auto keys = std::make_shared(storage, conf.keymanagerConfig()); + ExpectProvisionOK(Provisioner(conf.provision, storage, http, keys, {})); + + std::string pkey2; + std::string cert2; + std::string ca2; + EXPECT_TRUE(storage->loadTlsCreds(&ca2, &cert2, &pkey2)); + std::string public_key2; + std::string private_key2; + EXPECT_TRUE(storage->loadPrimaryKeys(&public_key2, &private_key2)); + + EXPECT_EQ(cert1, cert2); + EXPECT_EQ(ca1, ca2); + EXPECT_EQ(pkey1, pkey2); + EXPECT_EQ(public_key1, public_key2); + EXPECT_EQ(private_key1, private_key2); + } +} + +/** + * Check that aktualizr does not generate a pet name when device ID is + * specified. + */ +TEST(Provisioner, PetNameConfiguration) { + RecordProperty("zephyr_key", "OTA-985,TST-146"); + TemporaryDirectory temp_dir; + const std::string test_name = "test-name-123"; + + /* Make sure provided device ID is read as expected. */ + Config conf("tests/config/device_id.toml"); + conf.storage.path = temp_dir.Path(); + conf.provision.primary_ecu_serial = "testecuserial"; + + auto storage = INvStorage::newStorage(conf.storage); + auto http = std::make_shared(temp_dir.Path()); + auto keys = std::make_shared(storage, conf.keymanagerConfig()); + ExpectProvisionOK(Provisioner(conf.provision, storage, http, keys, {})); + + { + EXPECT_EQ(conf.provision.device_id, test_name); + std::string devid; + EXPECT_TRUE(storage->loadDeviceId(&devid)); + EXPECT_EQ(devid, test_name); + } + + { + /* Make sure name is unchanged after re-initializing config. */ + conf.postUpdateValues(); + EXPECT_EQ(conf.provision.device_id, test_name); + std::string devid; + EXPECT_TRUE(storage->loadDeviceId(&devid)); + EXPECT_EQ(devid, test_name); + } +} + +/** + * Check that aktualizr does not generate a pet name when device ID is + * already present in the device certificate's common name. This is the expected + * behavior required to support replacing a Primary ECU. + */ +TEST(Provisioner, PetNameDeviceCert) { + std::string test_name; + Config conf("tests/config/basic.toml"); + + { + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path()); + conf.storage.path = temp_dir.Path(); + auto storage = INvStorage::newStorage(conf.storage); + auto keys = std::make_shared(storage, conf.keymanagerConfig()); + storage->storeTlsCert(Utils::readFile("tests/test_data/prov/client.pem")); + test_name = keys->getCN(); + ExpectProvisionOK(Provisioner(conf.provision, storage, http, keys, {})); + std::string devid; + EXPECT_TRUE(storage->loadDeviceId(&devid)); + EXPECT_EQ(devid, test_name); + } + + { + /* Make sure name is unchanged after re-initializing with the same device + * certificate but otherwise a completely fresh storage. */ + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path()); + conf.storage.path = temp_dir.Path(); + auto storage = INvStorage::newStorage(conf.storage); + auto keys = std::make_shared(storage, conf.keymanagerConfig()); + storage->storeTlsCert(Utils::readFile("tests/test_data/prov/client.pem")); + ExpectProvisionOK(Provisioner(conf.provision, storage, http, keys, {})); + std::string devid; + EXPECT_TRUE(storage->loadDeviceId(&devid)); + EXPECT_EQ(devid, test_name); + } +} + +/** + * Check that aktualizr generates a pet name if no device ID is specified. + */ +TEST(Provisioner, PetNameCreation) { + RecordProperty("zephyr_key", "OTA-985,TST-145"); + TemporaryDirectory temp_dir; + + // Make sure name is created. + Config conf("tests/config/basic.toml"); + conf.storage.path = temp_dir.Path(); + conf.provision.primary_ecu_serial = "testecuserial"; + boost::filesystem::copy_file("tests/test_data/cred.zip", temp_dir.Path() / "cred.zip"); + conf.provision.provision_path = temp_dir.Path() / "cred.zip"; + + std::string test_name1, test_name2; + { + auto storage = INvStorage::newStorage(conf.storage); + auto http = std::make_shared(temp_dir.Path()); + auto keys = std::make_shared(storage, conf.keymanagerConfig()); + ExpectProvisionOK(Provisioner(conf.provision, storage, http, keys, {})); + + EXPECT_TRUE(storage->loadDeviceId(&test_name1)); + EXPECT_NE(test_name1, ""); + } + + // Make sure a new name is generated if using a new database and the config + // does not specify a device ID. + TemporaryDirectory temp_dir2; + { + conf.storage.path = temp_dir2.Path(); + boost::filesystem::copy_file("tests/test_data/cred.zip", temp_dir2.Path() / "cred.zip"); + conf.provision.device_id = ""; + + auto storage = INvStorage::newStorage(conf.storage); + auto http = std::make_shared(temp_dir2.Path()); + auto keys = std::make_shared(storage, conf.keymanagerConfig()); + ExpectProvisionOK(Provisioner(conf.provision, storage, http, keys, {})); + + EXPECT_TRUE(storage->loadDeviceId(&test_name2)); + EXPECT_NE(test_name2, test_name1); + } + + // If the device_id is cleared in the config, but still present in the + // storage, re-initializing the config should read the device_id from storage. + { + conf.provision.device_id = ""; + auto storage = INvStorage::newStorage(conf.storage); + auto http = std::make_shared(temp_dir2.Path()); + auto keys = std::make_shared(storage, conf.keymanagerConfig()); + ExpectProvisionOK(Provisioner(conf.provision, storage, http, keys, {})); + + std::string devid; + EXPECT_TRUE(storage->loadDeviceId(&devid)); + EXPECT_EQ(devid, test_name2); + } + + // If the device_id is removed from storage, but the field is still present in + // the config, re-initializing the config should still read the device_id from + // config. + { + TemporaryDirectory temp_dir3; + conf.storage.path = temp_dir3.Path(); + boost::filesystem::copy_file("tests/test_data/cred.zip", temp_dir3.Path() / "cred.zip"); + conf.provision.device_id = test_name2; + + auto storage = INvStorage::newStorage(conf.storage); + auto http = std::make_shared(temp_dir3.Path()); + auto keys = std::make_shared(storage, conf.keymanagerConfig()); + ExpectProvisionOK(Provisioner(conf.provision, storage, http, keys, {})); + + std::string devid; + EXPECT_TRUE(storage->loadDeviceId(&devid)); + EXPECT_EQ(devid, test_name2); + } +} + +enum class InitRetCode { kOk, kOccupied, kServerFailure, kStorageFailure, kSecondaryFailure, kBadP12, kPkcs11Failure }; + +class HttpFakeDeviceRegistration : public HttpFake { + public: + explicit HttpFakeDeviceRegistration(const boost::filesystem::path& test_dir_in) : HttpFake(test_dir_in) {} + + HttpResponse post(const std::string& url, const Json::Value& data) override { + if (url.find("/devices") != std::string::npos) { + if (retcode == InitRetCode::kOk) { + return HttpResponse(Utils::readFile("tests/test_data/cred.p12"), 200, CURLE_OK, ""); + } else if (retcode == InitRetCode::kOccupied) { + Json::Value response; + response["code"] = "device_already_registered"; + return HttpResponse(Utils::jsonToStr(response), 400, CURLE_OK, ""); + } else { + return HttpResponse("", 400, CURLE_OK, ""); + } + } + return HttpFake::post(url, data); + } + + InitRetCode retcode{InitRetCode::kOk}; +}; + +/* Detect and recover from failed device provisioning. */ +TEST(Provisioner, DeviceRegistration) { + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path()); + Config conf("tests/config/basic.toml"); + conf.uptane.director_server = http->tls_server + "/director"; + conf.uptane.repo_server = http->tls_server + "/repo"; + conf.tls.server = http->tls_server; + conf.storage.path = temp_dir.Path(); + conf.provision.primary_ecu_serial = "testecuserial"; + + auto storage = INvStorage::newStorage(conf.storage); + auto keys = std::make_shared(storage, conf.keymanagerConfig()); + + // Force a failure from the fake server due to device already registered. + { + http->retcode = InitRetCode::kOccupied; + ExpectProvisionError(Provisioner(conf.provision, storage, http, keys, {}), "already registered"); + } + + // Force an arbitrary failure from the fake server. + { + http->retcode = InitRetCode::kServerFailure; + ExpectProvisionError(Provisioner(conf.provision, storage, http, keys, {}), "Server error"); + } + + // Don't force a failure and make sure it actually works this time. + { + http->retcode = InitRetCode::kOk; + ExpectProvisionOK(Provisioner(conf.provision, storage, http, keys, {})); + } +} + +class HttpFakeEcuRegistration : public HttpFake { + public: + explicit HttpFakeEcuRegistration(const boost::filesystem::path& test_dir_in) : HttpFake(test_dir_in) {} + + HttpResponse post(const std::string& url, const Json::Value& data) override { + if (url.find("/director/ecus") != std::string::npos) { + if (retcode == InitRetCode::kOk) { + return HttpResponse("", 200, CURLE_OK, ""); + } else if (retcode == InitRetCode::kOccupied) { + Json::Value response; + response["code"] = "ecu_already_registered"; + return HttpResponse(Utils::jsonToStr(response), 400, CURLE_OK, ""); + } else { + return HttpResponse("", 400, CURLE_OK, ""); + } + } + return HttpFake::post(url, data); + } + + InitRetCode retcode{InitRetCode::kOk}; +}; + +/* Detect and recover from failed ECU registration. */ +TEST(Provisioner, EcuRegisteration) { + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path()); + Config conf("tests/config/basic.toml"); + conf.uptane.director_server = http->tls_server + "/director"; + conf.uptane.repo_server = http->tls_server + "/repo"; + conf.tls.server = http->tls_server; + conf.storage.path = temp_dir.Path(); + conf.provision.primary_ecu_serial = "testecuserial"; + + auto storage = INvStorage::newStorage(conf.storage); + auto keys = std::make_shared(storage, conf.keymanagerConfig()); + + // Force a failure from the fake server due to ECUs already registered. + { + http->retcode = InitRetCode::kOccupied; + ExpectProvisionError(Provisioner(conf.provision, storage, http, keys, {}), + "ECUs are unexpectedly already registered"); + } + + // Force an arbitary failure from the fake server. + { + http->retcode = InitRetCode::kServerFailure; + ExpectProvisionError(Provisioner(conf.provision, storage, http, keys, {}), "Server error"); + } + + // Don't force a failure and make sure it actually works this time. + { + http->retcode = InitRetCode::kOk; + ExpectProvisionOK(Provisioner(conf.provision, storage, http, keys, {})); + } +} + +/* Use the system hostname as hardware ID if one is not provided. */ +TEST(Provisioner, HostnameAsHardwareID) { + TemporaryDirectory temp_dir; + Config conf("tests/config/basic.toml"); + conf.storage.path = temp_dir.Path(); + + boost::filesystem::copy_file("tests/test_data/cred.zip", temp_dir.Path() / "cred.zip"); + conf.provision.provision_path = temp_dir.Path() / "cred.zip"; + + { + auto storage = INvStorage::newStorage(conf.storage); + auto http = std::make_shared(temp_dir.Path()); + auto keys = std::make_shared(storage, conf.keymanagerConfig()); + + EXPECT_TRUE(conf.provision.primary_ecu_hardware_id.empty()); + ExpectProvisionOK(Provisioner(conf.provision, storage, http, keys, {})); + + EcuSerials ecu_serials; + EXPECT_TRUE(storage->loadEcuSerials(&ecu_serials)); + EXPECT_GE(ecu_serials.size(), 1); + + auto primaryHardwareID = ecu_serials[0].second; + auto hostname = Utils::getHostname(); + EXPECT_EQ(primaryHardwareID, HardwareIdentifier(hostname)); + } +} + +TEST(Provisioner, StableEcuSerial) { + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path()); + Config conf("tests/config/basic.toml"); + conf.uptane.director_server = http->tls_server + "/director"; + conf.uptane.repo_server = http->tls_server + "/repo"; + conf.tls.server = http->tls_server; + conf.storage.path = temp_dir.Path(); + conf.provision.primary_ecu_serial = "testecuserial"; + + auto storage = INvStorage::newStorage(conf.storage); + auto keys = std::make_shared(storage, conf.keymanagerConfig()); + + EcuSerial orig_serial = EcuSerial::Unknown(); + HardwareIdentifier orig_hwid = HardwareIdentifier::Unknown(); + // Initial attempt is offline + { + http->retcode = InitRetCode::kServerFailure; + Provisioner dut(conf.provision, storage, http, keys, {}); + dut.Attempt(); + + orig_hwid = dut.PrimaryHardwareIdentifier(); + EXPECT_NE(orig_hwid, HardwareIdentifier::Unknown()); + orig_serial = dut.PrimaryEcuSerial(); + EXPECT_NE(orig_serial, EcuSerial::Unknown()); + } + + // Try again (on the next boot) + { + Provisioner dut(conf.provision, storage, http, keys, {}); + dut.Attempt(); + + // The serial number of the primary ECU should be unchanged + EXPECT_EQ(orig_serial, dut.PrimaryEcuSerial()); + } + + // Don't force a failure and make sure it actually works this time. + { + http->retcode = InitRetCode::kOk; + ExpectProvisionOK(Provisioner(conf.provision, storage, http, keys, {})); + + EcuSerials final_serials; + storage->loadEcuSerials(&final_serials); + ASSERT_EQ(final_serials.size(), 1); + EXPECT_EQ(final_serials[0].first, orig_serial); + EXPECT_EQ(final_serials[0].second, orig_hwid); + EXPECT_TRUE(storage->loadEcuRegistered()); + } +} + +#ifndef __NO_MAIN__ +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + logger_init(); + logger_set_threshold(boost::log::trivial::trace); + return RUN_ALL_TESTS(); +} +#endif diff --git a/src/libaktualizr/primary/provisioner_test_utils.cc b/src/libaktualizr/primary/provisioner_test_utils.cc new file mode 100644 index 0000000000..ba2269afb4 --- /dev/null +++ b/src/libaktualizr/primary/provisioner_test_utils.cc @@ -0,0 +1,31 @@ +#include "provisioner_test_utils.h" + +#include + +void ExpectProvisionOK(Provisioner&& provisioner) { + int attempts = 0; + bool last_attempt = false; + while (provisioner.ShouldAttemptAgain()) { + EXPECT_FALSE(last_attempt) << "Provisioner::Attempt() should return false iff ShouldAttemptAgain()"; + last_attempt = provisioner.Attempt(); + // Avoid infinite loops if the ShouldAttemptAgain doesn't work right + attempts++; + ASSERT_LE(attempts, 100) << "Far too many Provisioning attempts!"; + } + EXPECT_TRUE(last_attempt) << "Provisioner::Attempt() should return false iff ShouldAttemptAgain()"; + EXPECT_EQ(provisioner.CurrentState(), Provisioner::State::kOk); +} + +void ExpectProvisionError(Provisioner&& provisioner, const std::string& match) { + bool last_attempt; + for (int attempt = 0; attempt < 3; attempt++) { + EXPECT_TRUE(provisioner.ShouldAttemptAgain()); + last_attempt = provisioner.Attempt(); + ASSERT_FALSE(last_attempt) << "Expecting provisioning to fail with error " << match; + } + EXPECT_TRUE(provisioner.ShouldAttemptAgain()) + << "Provisioner::Attempt() should return false iff ShouldAttemptAgain()"; + auto err_message = provisioner.LastError(); + auto matches = err_message.find(match); + EXPECT_NE(matches, std::string::npos) << "Error message didn't contain " << match << " actual:" << err_message; +} diff --git a/src/libaktualizr/primary/provisioner_test_utils.h b/src/libaktualizr/primary/provisioner_test_utils.h new file mode 100644 index 0000000000..9c76bd86aa --- /dev/null +++ b/src/libaktualizr/primary/provisioner_test_utils.h @@ -0,0 +1,11 @@ +#ifndef AKTUALIZR_PROVISIONER_TEST_UTILS_H +#define AKTUALIZR_PROVISIONER_TEST_UTILS_H + +#include "primary/provisioner.h" + +// Test utility to run provisioning to completion and check the result +void ExpectProvisionOK(Provisioner&& provisioner); + +void ExpectProvisionError(Provisioner&& provisioner, const std::string& match = ""); + +#endif // AKTUALIZR_PROVISIONER_TEST_UTILS_H diff --git a/src/libaktualizr/primary/reportqueue.cc b/src/libaktualizr/primary/reportqueue.cc index c0328fc7e2..ab904a1b3e 100644 --- a/src/libaktualizr/primary/reportqueue.cc +++ b/src/libaktualizr/primary/reportqueue.cc @@ -2,10 +2,23 @@ #include -#include "config/config.h" - -ReportQueue::ReportQueue(const Config& config_in, std::shared_ptr http_client) - : config(config_in), http(std::move(http_client)) { +#include "http/httpclient.h" +#include "libaktualizr/config.h" +#include "logging/logging.h" +#include "storage/invstorage.h" +#include "storage/sql_utils.h" + +ReportQueue::ReportQueue(const Config& config_in, std::shared_ptr http_client, + std::shared_ptr storage_in, int run_pause_s, int event_number_limit) + : config(config_in), + http(std::move(http_client)), + storage(std::move(storage_in)), + run_pause_s_{run_pause_s}, + event_number_limit_{event_number_limit}, + cur_event_number_limit_{event_number_limit_} { + if (event_number_limit == 0) { + throw std::invalid_argument("Event number limit is set to 0 what leads to event accumulation in DB"); + } thread_ = std::thread(std::bind(&ReportQueue::run, this)); } @@ -17,7 +30,7 @@ ReportQueue::~ReportQueue() { cv_.notify_all(); thread_.join(); - LOG_DEBUG << "Flushing report queue"; + LOG_TRACE << "Flushing report queue"; flushQueue(); } @@ -28,22 +41,29 @@ void ReportQueue::run() { std::unique_lock lock(m_); while (!shutdown_) { flushQueue(); - cv_.wait_for(lock, std::chrono::seconds(10)); + cv_.wait_for(lock, std::chrono::seconds(run_pause_s_)); } } void ReportQueue::enqueue(std::unique_ptr event) { { std::lock_guard lock(m_); - report_queue_.push(std::move(event)); + storage->saveReportEvent(event->toJson()); } cv_.notify_all(); } void ReportQueue::flushQueue() { - while (!report_queue_.empty()) { - report_array.append(report_queue_.front()->toJson()); - report_queue_.pop(); + int64_t max_id = 0; + Json::Value report_array{Json::arrayValue}; + try { + storage->loadReportEvents(&report_array, &max_id, cur_event_number_limit_); + } catch (const SQLException& exc) { + LOG_ERROR << "Failed to read events from DB: " << exc.what(); + return; + } catch (const std::exception& exc) { + LOG_ERROR << "Unknown failure while reading events from DB: " << exc.what(); + return; } if (config.tls.server.empty()) { @@ -54,23 +74,44 @@ void ReportQueue::flushQueue() { if (!report_array.empty()) { HttpResponse response = http->post(config.tls.server + "/events", report_array); + + bool delete_events{response.isOk()}; // 404 implies the server does not support this feature. Nothing we can // do, just move along. - if (response.isOk() || response.http_status_code == 404) { - LOG_TRACE << "Server does not support event reports. Clearing report queue."; + if (response.http_status_code == 404) { + LOG_DEBUG << "Server does not support event reports. Clearing report queue."; + delete_events = true; + } else if (response.http_status_code == 413) { + if (report_array.size() > 1) { + // if 413 is received to posting of more than one event then try sending less events next time + cur_event_number_limit_ = report_array.size() > 2 ? report_array.size() / 2 : 1; + LOG_DEBUG << "Got 413 response to request that contains " << report_array.size() << " events. Will try to send " + << cur_event_number_limit_ << " events."; + } else { + // An event is too big to be accepted by the server, let's drop it + LOG_WARNING << "Dropping a report event " << report_array[0].get("id", "unknown") << " since the server `" + << config.tls.server << "` cannot digest it (413)."; + delete_events = true; + } + } else if (!response.isOk()) { + LOG_WARNING << "Failed to post update events: " << response.getStatusStr(); + } + if (delete_events) { report_array.clear(); + storage->deleteReportEvents(max_id); + cur_event_number_limit_ = event_number_limit_; } } } void ReportEvent::setEcu(const Uptane::EcuSerial& ecu) { custom["ecu"] = ecu.ToString(); } void ReportEvent::setCorrelationId(const std::string& correlation_id) { - if (correlation_id != "") { + if (!correlation_id.empty()) { custom["correlationId"] = correlation_id; } } -Json::Value ReportEvent::toJson() { +Json::Value ReportEvent::toJson() const { Json::Value out; out["id"] = id; diff --git a/src/libaktualizr/primary/reportqueue.h b/src/libaktualizr/primary/reportqueue.h index 0574f780c2..50a9677571 100644 --- a/src/libaktualizr/primary/reportqueue.h +++ b/src/libaktualizr/primary/reportqueue.h @@ -1,18 +1,20 @@ #ifndef REPORTQUEUE_H_ #define REPORTQUEUE_H_ +#include #include #include #include #include #include +#include // for move -#include +#include "libaktualizr/types.h" // for EcuSerial (ptr only), TimeStamp +#include "utilities/utils.h" // for Utils -#include "config/config.h" -#include "http/httpclient.h" -#include "logging/logging.h" -#include "uptane/tuf.h" +class Config; +class HttpInterface; +class INvStorage; class ReportEvent { public: @@ -22,7 +24,7 @@ class ReportEvent { Json::Value custom; TimeStamp timestamp; - Json::Value toJson(); + Json::Value toJson() const; protected: ReportEvent(std::string event_type, int event_version) @@ -34,27 +36,27 @@ class ReportEvent { class CampaignAcceptedReport : public ReportEvent { public: - CampaignAcceptedReport(const std::string& campaign_id); + explicit CampaignAcceptedReport(const std::string& campaign_id); }; class CampaignDeclinedReport : public ReportEvent { public: - CampaignDeclinedReport(const std::string& campaign_id); + explicit CampaignDeclinedReport(const std::string& campaign_id); }; class CampaignPostponedReport : public ReportEvent { public: - CampaignPostponedReport(const std::string& campaign_id); + explicit CampaignPostponedReport(const std::string& campaign_id); }; class DevicePausedReport : public ReportEvent { public: - DevicePausedReport(const std::string& correlation_id); + explicit DevicePausedReport(const std::string& correlation_id); }; class DeviceResumedReport : public ReportEvent { public: - DeviceResumedReport(const std::string& correlation_id); + explicit DeviceResumedReport(const std::string& correlation_id); }; class EcuDownloadStartedReport : public ReportEvent { @@ -84,8 +86,13 @@ class EcuInstallationCompletedReport : public ReportEvent { class ReportQueue { public: - ReportQueue(const Config& config_in, std::shared_ptr http_client); + ReportQueue(const Config& config_in, std::shared_ptr http_client, + std::shared_ptr storage_in, int run_pause_s = 10, int event_number_limit = -1); ~ReportQueue(); + ReportQueue(const ReportQueue&) = delete; + ReportQueue(ReportQueue&&) = delete; + ReportQueue& operator=(const ReportQueue&) = delete; + ReportQueue& operator=(ReportQueue&&) = delete; void run(); void enqueue(std::unique_ptr event); @@ -98,8 +105,11 @@ class ReportQueue { std::condition_variable cv_; std::mutex m_; std::queue> report_queue_; - Json::Value report_array{Json::arrayValue}; bool shutdown_{false}; + std::shared_ptr storage; + const int run_pause_s_; + const int event_number_limit_; + int cur_event_number_limit_; }; #endif // REPORTQUEUE_H_ diff --git a/src/libaktualizr/primary/reportqueue_test.cc b/src/libaktualizr/primary/reportqueue_test.cc index 6935940fb8..cab1d08a24 100644 --- a/src/libaktualizr/primary/reportqueue_test.cc +++ b/src/libaktualizr/primary/reportqueue_test.cc @@ -7,16 +7,27 @@ #include -#include "config/config.h" #include "httpfake.h" +#include "libaktualizr/config.h" #include "reportqueue.h" -#include "utilities/types.h" // TimeStamp +#include "storage/invstorage.h" +#include "storage/sqlstorage.h" #include "utilities/utils.h" class HttpFakeRq : public HttpFake { public: - HttpFakeRq(const boost::filesystem::path &test_dir_in, size_t expected_events) - : HttpFake(test_dir_in, ""), expected_events_(expected_events) {} + HttpFakeRq(const boost::filesystem::path &test_dir_in, size_t expected_events, int event_numb_limit = -1) + : HttpFake(test_dir_in, ""), + expected_events_(expected_events), + event_numb_limit_{event_numb_limit}, + last_request_expected_events_{expected_events_} { + if (event_numb_limit_ > 0) { + last_request_expected_events_ = expected_events_ % static_cast(event_numb_limit_); + if (last_request_expected_events_ == 0) { + last_request_expected_events_ = static_cast(event_numb_limit_); + } + } + } HttpResponse handle_event(const std::string &url, const Json::Value &data) override { (void)data; @@ -51,6 +62,45 @@ class HttpFakeRq : public HttpFake { } return HttpResponse("", 200, CURLE_OK, ""); } + } else if (url.find("reportqueue/StoreEvents") == 0) { + for (int i = 0; i < static_cast(data.size()); ++i) { + EXPECT_EQ(data[i]["eventType"]["id"], "EcuDownloadCompleted"); + EXPECT_EQ(data[i]["event"]["ecu"], "StoreEvents" + std::to_string(events_seen++)); + } + if (events_seen == expected_events_) { + expected_events_received.set_value(true); + } + return HttpResponse("", 200, CURLE_OK, ""); + } else if (url.find("reportqueue/EventNumberLimit") == 0) { + const auto recv_event_numb{data.size()}; + EXPECT_GT(recv_event_numb, 0); + events_seen += recv_event_numb; + EXPECT_LE(events_seen, expected_events_); + + if (events_seen < expected_events_) { + EXPECT_EQ(recv_event_numb, event_numb_limit_); + } else { + EXPECT_EQ(recv_event_numb, last_request_expected_events_); + expected_events_received.set_value(true); + } + + return HttpResponse("", 200, CURLE_OK, ""); + } else if (url.find("reportqueue/PayloadTooLarge") == 0) { + EXPECT_GT(data.size(), 0); + for (uint ii = 0; ii < data.size(); ++ii) { + if (data[ii]["id"] == "413") { + return HttpResponse("", 413, CURLE_OK, "Payload Too Large"); + } + if (data[ii]["id"] == "500" && bad_gateway_counter_ < data[ii]["err_numb"].asInt()) { + ++bad_gateway_counter_; + return HttpResponse("", 500, CURLE_OK, "Bad Gateway"); + } + } + events_seen += data.size(); + if (events_seen == expected_events_) { + expected_events_received.set_value(true); + } + return HttpResponse("", 200, CURLE_OK, ""); } LOG_ERROR << "Unexpected event: " << data; return HttpResponse("", 400, CURLE_OK, ""); @@ -59,6 +109,9 @@ class HttpFakeRq : public HttpFake { size_t events_seen{0}; size_t expected_events_; std::promise expected_events_received{}; + int event_numb_limit_; + size_t last_request_expected_events_; + int bad_gateway_counter_{0}; }; /* Test one event. */ @@ -70,7 +123,8 @@ TEST(ReportQueue, SingleEvent) { size_t num_events = 1; auto http = std::make_shared(temp_dir.Path(), num_events); - ReportQueue report_queue(config, http); + auto sql_storage = std::make_shared(config.storage, false); + ReportQueue report_queue(config, http, sql_storage); report_queue.enqueue(std_::make_unique(Uptane::EcuSerial("SingleEvent"), "", true)); @@ -88,7 +142,8 @@ TEST(ReportQueue, MultipleEvents) { size_t num_events = 10; auto http = std::make_shared(temp_dir.Path(), num_events); - ReportQueue report_queue(config, http); + auto sql_storage = std::make_shared(config.storage, false); + ReportQueue report_queue(config, http, sql_storage); for (int i = 0; i < 10; ++i) { report_queue.enqueue(std_::make_unique( @@ -110,16 +165,116 @@ TEST(ReportQueue, FailureRecovery) { size_t num_events = 10; auto http = std::make_shared(temp_dir.Path(), num_events); - ReportQueue report_queue(config, http); + auto sql_storage = std::make_shared(config.storage, false); + ReportQueue report_queue(config, http, sql_storage); - for (int i = 0; i < 10; ++i) { + for (size_t i = 0; i < num_events; ++i) { report_queue.enqueue(std_::make_unique( Uptane::EcuSerial("FailureRecovery" + std::to_string(i)), "", true)); } - // Wait at most 30 seconds for the messages to get processed. + // Wait at most 20 seconds for the messages to get processed. + http->expected_events_received.get_future().wait_for(std::chrono::seconds(20)); + EXPECT_EQ(http->events_seen, num_events); +} + +/* Test persistent storage of unsent events in the database across + * ReportQueue instantiations. */ +TEST(ReportQueue, StoreEvents) { + TemporaryDirectory temp_dir; + Config config; + config.storage.path = temp_dir.Path(); + config.tls.server = ""; + + auto sql_storage = std::make_shared(config.storage, false); + size_t num_events = 10; + auto check_sql = [sql_storage](size_t count) { + int64_t max_id = 0; + Json::Value report_array{Json::arrayValue}; + sql_storage->loadReportEvents(&report_array, &max_id); + EXPECT_EQ(max_id, count); + }; + + { + auto http = std::make_shared(temp_dir.Path(), num_events); + ReportQueue report_queue(config, http, sql_storage); + for (size_t i = 0; i < num_events; ++i) { + report_queue.enqueue(std_::make_unique( + Uptane::EcuSerial("StoreEvents" + std::to_string(i)), "", true)); + } + check_sql(num_events); + } + + config.tls.server = "reportqueue/StoreEvents"; + auto http = std::make_shared(temp_dir.Path(), num_events); + ReportQueue report_queue(config, http, sql_storage); + // Wait at most 20 seconds for the messages to get processed. http->expected_events_received.get_future().wait_for(std::chrono::seconds(20)); EXPECT_EQ(http->events_seen, num_events); + sleep(1); + check_sql(0); +} + +TEST(ReportQueue, LimitEventNumber) { + TemporaryDirectory temp_dir; + Config config; + config.storage.path = temp_dir.Path(); + config.tls.server = ""; + auto sql_storage = std::make_shared(config.storage, false); + + const std::vector> test_cases{ + {1, -1}, {1, 1}, {1, 2}, {10, -1}, {10, 1}, {10, 2}, {10, 3}, {10, 9}, {10, 10}, {10, 11}, + }; + for (const auto &tc : test_cases) { + const auto event_numb{std::get<0>(tc)}; + const auto event_numb_limit{std::get<1>(tc)}; + + Json::Value report_array{Json::arrayValue}; + for (uint ii = 0; ii < event_numb; ++ii) { + sql_storage->saveReportEvent(Utils::parseJSON(R"({"id": "some ID", "eventType": "some Event"})")); + } + + config.tls.server = "reportqueue/EventNumberLimit"; + auto http = std::make_shared(temp_dir.Path(), event_numb, event_numb_limit); + ReportQueue report_queue(config, http, sql_storage, 0, event_numb_limit); + // Wait at most 20 seconds for the messages to get processed. + http->expected_events_received.get_future().wait_for(std::chrono::seconds(20)); + EXPECT_EQ(http->events_seen, event_numb); + } +} + +TEST(ReportQueue, PayloadTooLarge) { + TemporaryDirectory temp_dir; + Config config; + config.storage.path = temp_dir.Path(); + config.tls.server = ""; + auto sql_storage = std::make_shared(config.storage, false); + + const std::vector> test_cases{ + {1, -1}, {1, 1}, {1, 2}, {13, -1}, {13, 1}, {13, 2}, {13, 3}, {13, 12}, {13, 13}, {13, 14}, + }; + for (const auto &tc : test_cases) { + const auto valid_event_numb{std::get<0>(tc)}; + const auto event_numb_limit{std::get<1>(tc)}; + + // inject "Too Big Event" at the beginning, middle, and the end of update event queues + sql_storage->saveReportEvent(Utils::parseJSON(R"({"id": "413", "eventType": "some Event"})")); + for (uint ii = 0; ii < valid_event_numb - 1; ++ii) { + sql_storage->saveReportEvent(Utils::parseJSON(R"({"id": "some ID", "eventType": "some Event"})")); + if (ii == valid_event_numb / 2) { + sql_storage->saveReportEvent(Utils::parseJSON(R"({"id": "413", "eventType": "some Event"})")); + } + } + // inject one "Bad Gateway" event, the server returns 500 twice and eventually it succeeds + sql_storage->saveReportEvent(Utils::parseJSON(R"({"id": "500", "err_numb": 2})")); + sql_storage->saveReportEvent(Utils::parseJSON(R"({"id": "413", "eventType": "some Event"})")); + + config.tls.server = "reportqueue/PayloadTooLarge"; + auto http = std::make_shared(temp_dir.Path(), valid_event_numb, event_numb_limit); + ReportQueue report_queue(config, http, sql_storage, 0, event_numb_limit); + http->expected_events_received.get_future().wait_for(std::chrono::seconds(20)); + EXPECT_EQ(http->events_seen, valid_event_numb); + } } #ifndef __NO_MAIN__ diff --git a/src/libaktualizr/primary/reregistration_test.cc b/src/libaktualizr/primary/reregistration_test.cc new file mode 100644 index 0000000000..c822a92f57 --- /dev/null +++ b/src/libaktualizr/primary/reregistration_test.cc @@ -0,0 +1,213 @@ +#include + +#include +#include + +#include "httpfake.h" +#include "libaktualizr/config.h" +#include "uptane_test_common.h" +#include "virtualsecondary.h" + +boost::filesystem::path fake_meta_dir; + +class HttpFakeRegistration : public HttpFake { + public: + HttpFakeRegistration(const boost::filesystem::path& test_dir_in, const boost::filesystem::path& meta_dir_in) + : HttpFake(test_dir_in, "noupdates", meta_dir_in) {} + + HttpResponse post(const std::string& url, const Json::Value& data) override { + if (url.find("/devices") != std::string::npos) { + device_registration_count++; + auto this_device_id = data["deviceId"].asString(); + if (ecu_registration_count <= 1) { + device_id = this_device_id; + } else { + EXPECT_EQ(device_id, this_device_id) << "deviceId should change during provisioning"; + } + } + if (url.find("/director/ecus") != std::string::npos) { + ecu_registration_count++; + EXPECT_EQ(data["primary_ecu_serial"].asString(), "CA:FE:A6:D2:84:9D"); + EXPECT_EQ(data["ecus"][0]["ecu_serial"].asString(), "CA:FE:A6:D2:84:9D"); + EXPECT_EQ(data["ecus"][0]["hardware_identifier"].asString(), "primary_hw"); + if (ecu_registration_count == 1) { + primary_ecu_info = data["ecus"][0]; + } else { + EXPECT_EQ(primary_ecu_info, data["ecus"][0]) << "Information about primary ECU shouldn't change"; + } + } + + return HttpFake::post(url, data); + } + + unsigned int ecu_registration_count{0}; + unsigned int device_registration_count{0}; + Json::Value primary_ecu_info; + std::string device_id; +}; + +/* + * Add a Secondary via API, register the ECUs, then add another, and re-register. + */ +TEST(Aktualizr, AddSecondary) { + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path(), fake_meta_dir); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + auto storage = INvStorage::newStorage(conf.storage); + + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + Primary::VirtualSecondaryConfig ecu_config = UptaneTestCommon::altVirtualConfiguration(temp_dir.Path()); + aktualizr.AddSecondary(std::make_shared(ecu_config)); + aktualizr.Initialize(); + + std::vector expected_ecus = {"CA:FE:A6:D2:84:9D", "ecuserial3", "secondary_ecu_serial"}; + UptaneTestCommon::verifyEcus(temp_dir, expected_ecus); + EXPECT_EQ(http->device_registration_count, 1); + EXPECT_EQ(http->ecu_registration_count, 1); + + ecu_config.ecu_serial = "ecuserial4"; + aktualizr.AddSecondary(std::make_shared(ecu_config)); + aktualizr.Initialize(); + expected_ecus.push_back(ecu_config.ecu_serial); + UptaneTestCommon::verifyEcus(temp_dir, expected_ecus); + EXPECT_EQ(http->device_registration_count, 1); + EXPECT_EQ(http->ecu_registration_count, 2); +} + +/* + * Add a Secondary via API, register the ECUs, remove one, and re-register. + */ +TEST(Aktualizr, RemoveSecondary) { + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path(), fake_meta_dir); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + auto storage = INvStorage::newStorage(conf.storage); + + { + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + Primary::VirtualSecondaryConfig ecu_config = UptaneTestCommon::altVirtualConfiguration(temp_dir.Path()); + aktualizr.AddSecondary(std::make_shared(ecu_config)); + aktualizr.Initialize(); + + std::vector expected_ecus = {"CA:FE:A6:D2:84:9D", "ecuserial3", "secondary_ecu_serial"}; + UptaneTestCommon::verifyEcus(temp_dir, expected_ecus); + EXPECT_EQ(http->device_registration_count, 1); + EXPECT_EQ(http->ecu_registration_count, 1); + } + + { + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + aktualizr.Initialize(); + + std::vector expected_ecus = {"CA:FE:A6:D2:84:9D", "secondary_ecu_serial"}; + UptaneTestCommon::verifyEcus(temp_dir, expected_ecus); + EXPECT_EQ(http->device_registration_count, 1); + EXPECT_EQ(http->ecu_registration_count, 2); + } +} + +/* + * Add a Secondary via API, register the ECUs, replace one, and re-register. + */ +TEST(Aktualizr, ReplaceSecondary) { + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path(), fake_meta_dir); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + auto storage = INvStorage::newStorage(conf.storage); + + { + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + Primary::VirtualSecondaryConfig ecu_config = UptaneTestCommon::altVirtualConfiguration(temp_dir.Path()); + aktualizr.AddSecondary(std::make_shared(ecu_config)); + aktualizr.Initialize(); + + std::vector expected_ecus = {"CA:FE:A6:D2:84:9D", "ecuserial3", "secondary_ecu_serial"}; + UptaneTestCommon::verifyEcus(temp_dir, expected_ecus); + EXPECT_EQ(http->device_registration_count, 1); + EXPECT_EQ(http->ecu_registration_count, 1); + } + + { + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + Primary::VirtualSecondaryConfig ecu_config = UptaneTestCommon::altVirtualConfiguration(temp_dir.Path()); + ecu_config.ecu_serial = "ecuserial4"; + aktualizr.AddSecondary(std::make_shared(ecu_config)); + aktualizr.Initialize(); + + std::vector expected_ecus = {"CA:FE:A6:D2:84:9D", "ecuserial4", "secondary_ecu_serial"}; + UptaneTestCommon::verifyEcus(temp_dir, expected_ecus); + EXPECT_EQ(http->device_registration_count, 1); + EXPECT_EQ(http->ecu_registration_count, 2); + } +} + +/** + * Restarting Aktualizr without changing the secondaries should not result in it getting re-registered + */ +TEST(Aktualizr, RestartNoRegisterSecondaries) { + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path(), fake_meta_dir); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + auto storage = INvStorage::newStorage(conf.storage); + + { + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + Primary::VirtualSecondaryConfig ecu_config = UptaneTestCommon::altVirtualConfiguration(temp_dir.Path()); + aktualizr.AddSecondary(std::make_shared(ecu_config)); + aktualizr.Initialize(); + EXPECT_EQ(http->device_registration_count, 1); + EXPECT_EQ(http->ecu_registration_count, 1); + } + + { + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + Primary::VirtualSecondaryConfig ecu_config = UptaneTestCommon::altVirtualConfiguration(temp_dir.Path()); + aktualizr.AddSecondary(std::make_shared(ecu_config)); + aktualizr.Initialize(); + EXPECT_EQ(http->device_registration_count, 1); + EXPECT_EQ(http->ecu_registration_count, 1); + } +} + +/** + * Restarting Aktualizr should not result in it getting re-registered if it has no secondaries. + * This is similar to RestartNoRegisterSecondaries, but with zero secondaries. + */ +TEST(Aktualizr, RestartNoRegisterPrimaryOnly) { + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path(), fake_meta_dir); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + + { + auto storage = INvStorage::newStorage(conf.storage); + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + aktualizr.Initialize(); + EXPECT_EQ(http->device_registration_count, 1); + EXPECT_EQ(http->ecu_registration_count, 1); + } + + { + auto storage = INvStorage::newStorage(conf.storage); + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + aktualizr.Initialize(); + EXPECT_EQ(http->device_registration_count, 1); + EXPECT_EQ(http->ecu_registration_count, 1); + } +} + +#ifndef __NO_MAIN__ +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + + logger_init(); + logger_set_threshold(boost::log::trivial::trace); + + TemporaryDirectory tmp_dir; + fake_meta_dir = tmp_dir.Path(); + MetaFake meta_fake(fake_meta_dir); + + return RUN_ALL_TESTS(); +} +#endif + +// vim: set tabstop=2 shiftwidth=2 expandtab: diff --git a/src/libaktualizr/primary/secondary_config.h b/src/libaktualizr/primary/secondary_config.h index f92457d4c8..b04a2194ea 100644 --- a/src/libaktualizr/primary/secondary_config.h +++ b/src/libaktualizr/primary/secondary_config.h @@ -1,16 +1,24 @@ #ifndef PRIMARY_SECONDARY_CONFIG_H_ #define PRIMARY_SECONDARY_CONFIG_H_ +#include + namespace Primary { class SecondaryConfig { public: - explicit SecondaryConfig(const char* type) : type_(type) {} - virtual const char* type() const { return type_; } + explicit SecondaryConfig(std::string type) : type_(std::move(type)) {} virtual ~SecondaryConfig() = default; + virtual std::string type() const { return type_; } + + protected: + SecondaryConfig(const SecondaryConfig &) = default; + SecondaryConfig(SecondaryConfig &&) = default; + SecondaryConfig &operator=(const SecondaryConfig &) = default; + SecondaryConfig &operator=(SecondaryConfig &&) = default; private: - const char* const type_; + std::string type_; }; } // namespace Primary diff --git a/src/libaktualizr/primary/secondary_provider.cc b/src/libaktualizr/primary/secondary_provider.cc new file mode 100644 index 0000000000..132ee56dae --- /dev/null +++ b/src/libaktualizr/primary/secondary_provider.cc @@ -0,0 +1,104 @@ +#include "libaktualizr/secondary_provider.h" + +#include + +#include "logging/logging.h" +#include "storage/invstorage.h" +#include "uptane/tuf.h" +#include "utilities/utils.h" + +bool SecondaryProvider::getMetadata(Uptane::MetaBundle* meta_bundle, const Uptane::Target& target) const { + if (!getDirectorMetadata(meta_bundle)) { + return false; + } + if (!getImageRepoMetadata(meta_bundle, target)) { + return false; + } + return true; +} + +bool SecondaryProvider::getDirectorMetadata(Uptane::MetaBundle* meta_bundle) const { + std::string root; + std::string targets; + + if (!storage_->loadLatestRoot(&root, Uptane::RepositoryType::Director())) { + LOG_ERROR << "No Director Root metadata to send"; + return false; + } + if (!storage_->loadNonRoot(&targets, Uptane::RepositoryType::Director(), Uptane::Role::Targets())) { + LOG_ERROR << "No Director Targets metadata to send"; + return false; + } + + meta_bundle->emplace(std::make_pair(Uptane::RepositoryType::Director(), Uptane::Role::Root()), root); + meta_bundle->emplace(std::make_pair(Uptane::RepositoryType::Director(), Uptane::Role::Targets()), targets); + return true; +} + +bool SecondaryProvider::getImageRepoMetadata(Uptane::MetaBundle* meta_bundle, const Uptane::Target& target) const { + std::string root; + std::string timestamp; + std::string snapshot; + std::string targets; + + if (!storage_->loadLatestRoot(&root, Uptane::RepositoryType::Image())) { + LOG_ERROR << "No Image repo Root metadata to send"; + return false; + } + if (!storage_->loadNonRoot(×tamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp())) { + LOG_ERROR << "No Image repo Timestamp metadata to send"; + return false; + } + if (!storage_->loadNonRoot(&snapshot, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot())) { + LOG_ERROR << "No Image repo Snapshot metadata to send"; + return false; + } + if (!storage_->loadNonRoot(&targets, Uptane::RepositoryType::Image(), Uptane::Role::Targets())) { + LOG_ERROR << "No Image repo Targets metadata to send"; + return false; + } + + meta_bundle->emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Root()), root); + meta_bundle->emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()), timestamp); + meta_bundle->emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()), snapshot); + meta_bundle->emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Targets()), targets); + + // TODO: Support delegations for Secondaries. This is the purpose of providing + // the desired Target. + (void)target; + + return true; +} + +std::string SecondaryProvider::getTreehubCredentials() const { + if (config_.tls.pkey_source != CryptoSource::kFile || config_.tls.cert_source != CryptoSource::kFile || + config_.tls.ca_source != CryptoSource::kFile) { + LOG_ERROR << "Cannot send OSTree update to a Secondary when not using file as credential sources"; + return ""; + } + std::string ca; + std::string cert; + std::string pkey; + if (!storage_->loadTlsCreds(&ca, &cert, &pkey)) { + LOG_ERROR << "Could not load TLS credentials from storage"; + return ""; + } + + const std::string treehub_url = config_.pacman.ostree_server; + std::map archive_map = { + {"ca.pem", ca}, {"client.pem", cert}, {"pkey.pem", pkey}, {"server.url", treehub_url}}; + + try { + std::stringstream as; + Utils::writeArchive(archive_map, as); + + return as.str(); + } catch (std::runtime_error& exc) { + LOG_ERROR << "Could not create credentials archive: " << exc.what(); + return ""; + } +} + +std::ifstream SecondaryProvider::getTargetFileHandle(const Uptane::Target& target) const { + return package_manager_->openTargetFile(target); +} diff --git a/src/libaktualizr/primary/secondary_provider_builder.h b/src/libaktualizr/primary/secondary_provider_builder.h new file mode 100644 index 0000000000..f3f7ce533b --- /dev/null +++ b/src/libaktualizr/primary/secondary_provider_builder.h @@ -0,0 +1,24 @@ +#ifndef UPTANE_SECONDARY_PROVIDER_BUILDER_H +#define UPTANE_SECONDARY_PROVIDER_BUILDER_H + +#include + +#include "libaktualizr/secondary_provider.h" + +class SecondaryProviderBuilder { + public: + static std::shared_ptr Build( + Config &config, const std::shared_ptr &storage, + const std::shared_ptr &package_manager) { + return std::make_shared(SecondaryProvider(config, storage, package_manager)); + } + ~SecondaryProviderBuilder() = default; + SecondaryProviderBuilder(const SecondaryProviderBuilder &) = delete; + SecondaryProviderBuilder(SecondaryProviderBuilder &&) = delete; + SecondaryProviderBuilder &operator=(const SecondaryProviderBuilder &) = delete; + SecondaryProviderBuilder &operator=(SecondaryProviderBuilder &&) = delete; + + private: + SecondaryProviderBuilder() = default; +}; +#endif // UPTANE_SECONDARY_PROVIDER_BUILDER_H diff --git a/src/libaktualizr/primary/sotauptaneclient.cc b/src/libaktualizr/primary/sotauptaneclient.cc index 3e0586d2c1..0eac3e1f22 100644 --- a/src/libaktualizr/primary/sotauptaneclient.cc +++ b/src/libaktualizr/primary/sotauptaneclient.cc @@ -1,19 +1,16 @@ -#include "sotauptaneclient.h" +#include "primary/sotauptaneclient.h" #include -#include +#include #include #include -#include "campaign/campaign.h" #include "crypto/crypto.h" #include "crypto/keymanager.h" -#include "initializer.h" +#include "libaktualizr/campaign.h" #include "logging/logging.h" -#include "package_manager/packagemanagerfactory.h" +#include "provisioner.h" #include "uptane/exceptions.h" - -#include "utilities/fault_injection.h" #include "utilities/utils.h" static void report_progress_cb(event::Channel *channel, const Uptane::Target &target, const std::string &description, @@ -25,69 +22,72 @@ static void report_progress_cb(event::Channel *channel, const Uptane::Target &ta (*channel)(event); } -std::shared_ptr SotaUptaneClient::newDefaultClient( - Config &config_in, std::shared_ptr storage_in, std::shared_ptr events_channel_in) { - std::shared_ptr http_client_in = std::make_shared(); - std::shared_ptr bootloader_in = std::make_shared(config_in.bootloader, *storage_in); - std::shared_ptr report_queue_in = std::make_shared(config_in, http_client_in); - - return std::make_shared(config_in, storage_in, http_client_in, bootloader_in, report_queue_in, - events_channel_in); -} - -SotaUptaneClient::SotaUptaneClient(Config &config_in, const std::shared_ptr &storage_in, - std::shared_ptr http_client, - std::shared_ptr bootloader_in, - std::shared_ptr report_queue_in, +/** + * A utility class to compare targets between Image and Director repositories. + * The definition of 'sameness' is in Target::MatchTarget(). + */ +class TargetCompare { + public: + explicit TargetCompare(const Uptane::Target &target_in) : target(target_in) {} + bool operator()(const Uptane::Target &in) const { return (in.MatchTarget(target)); } + + private: + const Uptane::Target ⌖ +}; + +SotaUptaneClient::SotaUptaneClient(Config &config_in, std::shared_ptr storage_in, + std::shared_ptr http_in, std::shared_ptr events_channel_in) : config(config_in), - uptane_manifest(config, storage_in), - storage(storage_in), - http(std::move(http_client)), - bootloader(std::move(bootloader_in)), - report_queue(std::move(report_queue_in)), - events_channel(std::move(events_channel_in)) { - uptane_fetcher = std::make_shared(config, http); - - // consider boot successful as soon as we started, missing internet connection or connection to secondaries are not - // proper reasons to roll back - package_manager_ = PackageManagerFactory::makePackageManager(config.pacman, storage, bootloader, http); - if (package_manager_->imageUpdated()) { - bootloader->setBootOK(); - } + storage(std::move(storage_in)), + http(std::move(http_in)), + package_manager_(PackageManagerFactory::makePackageManager(config.pacman, config.bootloader, storage, http)), + key_manager_(std::make_shared(storage, config.keymanagerConfig())), + uptane_fetcher(new Uptane::Fetcher(config, http)), + events_channel(std::move(events_channel_in)), + provisioner_(config.provision, storage, http, key_manager_, secondaries) { + report_queue = std_::make_unique(config, http, storage); + secondary_provider_ = SecondaryProviderBuilder::Build(config, storage, package_manager_); } -SotaUptaneClient::~SotaUptaneClient() { conn.disconnect(); } +void SotaUptaneClient::addSecondary(const std::shared_ptr &sec) { + Uptane::EcuSerial serial = sec->getSerial(); -void SotaUptaneClient::addNewSecondary(const std::shared_ptr &sec) { - if (storage->loadEcuRegistered()) { - EcuSerials serials; - storage->loadEcuSerials(&serials); - SerialCompare secondary_comp(sec->getSerial()); - if (std::find_if(serials.cbegin(), serials.cend(), secondary_comp) == serials.cend()) { - throw std::logic_error("Add new secondaries for provisioned device is not implemented yet"); - } - } - addSecondary(sec); -} - -void SotaUptaneClient::addSecondary(const std::shared_ptr &sec) { - const Uptane::EcuSerial sec_serial = sec->getSerial(); - const Uptane::HardwareIdentifier sec_hw_id = sec->getHwId(); - std::map>::const_iterator map_it = - secondaries.find(sec_serial); + const auto map_it = secondaries.find(serial); if (map_it != secondaries.end()) { - throw std::runtime_error(std::string("Multiple secondaries found with the same serial: ") + sec_serial.ToString()); + throw std::runtime_error(std::string("Multiple Secondaries found with the same serial: ") + serial.ToString()); } - secondaries.insert(std::make_pair(sec_serial, sec)); - hw_ids.insert(std::make_pair(sec_serial, sec_hw_id)); + + secondaries.emplace(serial, sec); + sec->init(secondary_provider_); + provisioner_.SecondariesWereChanged(); } -bool SotaUptaneClient::isInstalledOnPrimary(const Uptane::Target &target) { - if (target.ecus().find(uptane_manifest.getPrimaryEcuSerial()) != target.ecus().end()) { - return target.MatchTarget(package_manager_->getCurrent()); +bool SotaUptaneClient::attemptProvision() { + bool already_provisioned = provisioner_.CurrentState() == Provisioner::State::kOk; + if (already_provisioned) { + return true; } - return false; + if (!provisioner_.Attempt()) { + return false; + } + // If we got here, provisioning occurred in this run, dump some debugging information + LOG_INFO << "Primary ECU serial: " << provisioner_.PrimaryEcuSerial() + << " with hardware ID: " << provisioner_.PrimaryHardwareIdentifier(); + + LOG_INFO << "Device ID: " << provisioner_.DeviceId(); + LOG_INFO << "Device Gateway URL: " << config.tls.server; + + std::string subject; + std::string issuer; + std::string not_before; + std::string not_after; + key_manager_->getCertInfo(&subject, &issuer, ¬_before, ¬_after); + LOG_INFO << "Certificate subject: " << subject; + LOG_INFO << "Certificate issuer: " << issuer; + LOG_INFO << "Certificate valid from: " << not_before << " until: " << not_after; + LOG_DEBUG << "... provisioned OK"; + return true; } std::vector SotaUptaneClient::findForEcu(const std::vector &targets, @@ -106,63 +106,68 @@ data::InstallationResult SotaUptaneClient::PackageInstall(const Uptane::Target & try { return package_manager_->install(target); } catch (std::exception &ex) { + LOG_ERROR << "Installation failed: " << ex.what(); return data::InstallationResult(data::ResultCode::Numeric::kInstallFailed, ex.what()); } } void SotaUptaneClient::finalizeAfterReboot() { - if (!bootloader->rebootDetected()) { - // nothing to do + // TODO: consider bringing checkAndUpdatePendingSecondaries and the following functionality + // to the common denominator + if (!hasPendingUpdates()) { + LOG_DEBUG << "No pending updates, continuing with initialization"; return; } - LOG_INFO << "Device has been rebooted after an update"; + LOG_INFO << "Checking for a pending update to apply for Primary ECU"; - std::vector updates; - unsigned int ecus_count = 0; - if (uptaneOfflineIteration(&updates, &ecus_count)) { - const Uptane::EcuSerial &ecu_serial = uptane_manifest.getPrimaryEcuSerial(); + const Uptane::EcuSerial primary_ecu_serial = primaryEcuSerial(); + boost::optional pending_target; + storage->loadInstalledVersions(primary_ecu_serial.ToString(), nullptr, &pending_target); - std::vector installed_versions; - boost::optional pending_target; - storage->loadInstalledVersions(ecu_serial.ToString(), nullptr, &pending_target); + if (!pending_target) { + LOG_ERROR << "No pending update for Primary ECU found, continuing with initialization"; + return; + } - if (!!pending_target) { - const std::string correlation_id = pending_target->correlation_id(); + LOG_INFO << "Pending update for Primary ECU was found, trying to apply it..."; - data::InstallationResult install_res = package_manager_->finalizeInstall(*pending_target); - storage->saveEcuInstallationResult(ecu_serial, install_res); - if (install_res.success) { - storage->saveInstalledVersion(ecu_serial.ToString(), *pending_target, InstalledVersionUpdateMode::kCurrent); - report_queue->enqueue(std_::make_unique(ecu_serial, correlation_id, true)); - } else { - // finalize failed - // unset pending flag so that the rest of the uptane process can - // go forward again - storage->saveInstalledVersion(ecu_serial.ToString(), *pending_target, InstalledVersionUpdateMode::kNone); - report_queue->enqueue(std_::make_unique(ecu_serial, correlation_id, false)); - director_repo.dropTargets(*storage); // fix for OTA-2587, listen to backend again after end of install - } + data::InstallationResult install_res = package_manager_->finalizeInstall(*pending_target); - computeDeviceInstallationResult(nullptr, correlation_id); - putManifestSimple(); - } else { - // nothing found on primary - LOG_ERROR << "Expected reboot after update on primary but no update found"; - } + if (install_res.result_code == data::ResultCode::Numeric::kNeedCompletion) { + LOG_INFO << "Pending update for Primary ECU was not applied because reboot was not detected, " + "continuing with initialization"; + return; + } + + storage->saveEcuInstallationResult(primary_ecu_serial, install_res); + + const std::string correlation_id = pending_target->correlation_id(); + if (install_res.success) { + storage->saveInstalledVersion(primary_ecu_serial.ToString(), *pending_target, InstalledVersionUpdateMode::kCurrent); + + report_queue->enqueue(std_::make_unique(primary_ecu_serial, correlation_id, true)); } else { - LOG_ERROR << "Invalid Uptane metadata in storage."; + // finalize failed, unset pending flag so that the rest of the Uptane process can go forward again + storage->saveInstalledVersion(primary_ecu_serial.ToString(), *pending_target, InstalledVersionUpdateMode::kNone); + report_queue->enqueue(std_::make_unique(primary_ecu_serial, correlation_id, false)); } - bootloader->rebootFlagClear(); + director_repo.dropTargets(*storage); // fix for OTA-2587, listen to backend again after end of install + + data::InstallationResult ir; + std::string raw_report; + computeDeviceInstallationResult(&ir, &raw_report); + storage->storeDeviceInstallationResult(ir, raw_report, correlation_id); + putManifestSimple(); } data::InstallationResult SotaUptaneClient::PackageInstallSetResult(const Uptane::Target &target) { data::InstallationResult result; - Uptane::EcuSerial ecu_serial = uptane_manifest.getPrimaryEcuSerial(); + Uptane::EcuSerial ecu_serial = primaryEcuSerial(); // This is to recover more gracefully if the install process was interrupted - // but ends up booting the new version anyway (e.g: ostree finished + // but ends up booting the new version anyway (e.g: OSTree finished // deploying but the device restarted before the final saveInstalledVersion // was called). // By storing the version in the table (as uninstalled), we can still pick @@ -176,67 +181,178 @@ data::InstallationResult SotaUptaneClient::PackageInstallSetResult(const Uptane: // simple case: update already completed storage->saveInstalledVersion(ecu_serial.ToString(), target, InstalledVersionUpdateMode::kCurrent); } else if (result.result_code.num_code == data::ResultCode::Numeric::kNeedCompletion) { - // ostree case: need reboot + // OSTree case: need reboot storage->saveInstalledVersion(ecu_serial.ToString(), target, InstalledVersionUpdateMode::kPending); } storage->saveEcuInstallationResult(ecu_serial, result); return result; } +/* Hardware info is treated differently than the other device data. The default + * info (supplied via lshw) is only sent once and never again, even if it + * changes. (Unfortunately, it can change often due to CPU frequency scaling.) + * However, users can provide custom info via the API, and that will be sent if + * it has changed. */ void SotaUptaneClient::reportHwInfo() { - Json::Value hw_info = Utils::getHardwareInfo(); - if (!hw_info.empty()) { - http->put(config.tls.server + "/core/system_info", hw_info); + Json::Value hw_info; + std::string stored_hash; + storage->loadDeviceDataHash("hardware_info", &stored_hash); + + if (custom_hardware_info_.empty()) { + if (!stored_hash.empty()) { + LOG_TRACE << "Not reporting default hardware information because it has already been reported"; + return; + } + hw_info = Utils::getHardwareInfo(); + if (hw_info.empty()) { + LOG_WARNING << "Unable to fetch hardware information from host system."; + return; + } } else { - LOG_WARNING << "Unable to fetch hardware information from host system."; + hw_info = custom_hardware_info_; + } + + const Hash new_hash = Hash::generate(Hash::Type::kSha256, Utils::jsonToCanonicalStr(hw_info)); + if (new_hash != Hash(Hash::Type::kSha256, stored_hash)) { + if (custom_hardware_info_.empty()) { + LOG_DEBUG << "Reporting default hardware information"; + } else { + LOG_DEBUG << "Reporting custom hardware information"; + } + const HttpResponse response = http->put(config.tls.server + "/system_info", hw_info); + if (response.isOk()) { + storage->storeDeviceDataHash("hardware_info", new_hash.HashString()); + } + } else { + LOG_TRACE << "Not reporting hardware information because it has not changed"; } } void SotaUptaneClient::reportInstalledPackages() { - http->put(config.tls.server + "/core/installed", package_manager_->getInstalledPackages()); + const Json::Value packages = package_manager_->getInstalledPackages(); + const Hash new_hash = Hash::generate(Hash::Type::kSha256, Utils::jsonToCanonicalStr(packages)); + std::string stored_hash; + if (!(storage->loadDeviceDataHash("installed_packages", &stored_hash) && + new_hash == Hash(Hash::Type::kSha256, stored_hash))) { + LOG_DEBUG << "Reporting installed packages"; + const HttpResponse response = http->put(config.tls.server + "/core/installed", packages); + if (response.isOk()) { + storage->storeDeviceDataHash("installed_packages", new_hash.HashString()); + } + } else { + LOG_TRACE << "Not reporting installed packages because they have not changed"; + } } void SotaUptaneClient::reportNetworkInfo() { - if (config.telemetry.report_network) { + if (!config.telemetry.report_network) { + LOG_TRACE << "Not reporting network information because telemetry is disabled"; + return; + } + + Json::Value network_info; + try { + network_info = Utils::getNetworkInfo(); + } catch (const std::exception &ex) { + LOG_ERROR << "Failed to get network info: " << ex.what(); + return; + } + const Hash new_hash = Hash::generate(Hash::Type::kSha256, Utils::jsonToCanonicalStr(network_info)); + std::string stored_hash; + if (!(storage->loadDeviceDataHash("network_info", &stored_hash) && + new_hash == Hash(Hash::Type::kSha256, stored_hash))) { LOG_DEBUG << "Reporting network information"; - Json::Value network_info = Utils::getNetworkInfo(); - if (network_info != last_network_info_reported) { - HttpResponse response = http->put(config.tls.server + "/system_info/network", network_info); - if (response.isOk()) { - last_network_info_reported = network_info; - } + const HttpResponse response = http->put(config.tls.server + "/system_info/network", network_info); + if (response.isOk()) { + storage->storeDeviceDataHash("network_info", new_hash.HashString()); } + } else { + LOG_TRACE << "Not reporting network information because it has not changed"; + } +} + +void SotaUptaneClient::reportAktualizrConfiguration() { + if (!config.telemetry.report_config) { + LOG_TRACE << "Not reporting libaktualizr configuration because telemetry is disabled"; + return; + } + std::stringstream conf_ss; + config.writeToStream(conf_ss); + const std::string conf_str = conf_ss.str(); + const Hash new_hash = Hash::generate(Hash::Type::kSha256, conf_str); + std::string stored_hash; + if (!(storage->loadDeviceDataHash("configuration", &stored_hash) && + new_hash == Hash(Hash::Type::kSha256, stored_hash))) { + LOG_DEBUG << "Reporting libaktualizr configuration"; + const HttpResponse response = http->post(config.tls.server + "/system_info/config", "application/toml", conf_str); + if (response.isOk()) { + storage->storeDeviceDataHash("configuration", new_hash.HashString()); + } } else { - LOG_DEBUG << "Not reporting network information because telemetry is disabled"; + LOG_TRACE << "Not reporting libaktualizr configuration because it has not changed"; } } Json::Value SotaUptaneClient::AssembleManifest() { Json::Value manifest; // signed top-level - Uptane::EcuSerial primary_ecu_serial = uptane_manifest.getPrimaryEcuSerial(); + Uptane::EcuSerial primary_ecu_serial = primaryEcuSerial(); manifest["primary_ecu_serial"] = primary_ecu_serial.ToString(); - // first part: report current version/state of all ecus + // first part: report current version/state of all ECUs Json::Value version_manifest; - Json::Value primary_ecu_version = package_manager_->getManifest(primary_ecu_serial); - version_manifest[primary_ecu_serial.ToString()] = uptane_manifest.signManifest(primary_ecu_version); + Json::Value primary_manifest = uptane_manifest->assembleManifest(package_manager_->getCurrent()); + std::vector> ecu_cnt; + std::string report_counter; + if (!storage->loadEcuReportCounter(&ecu_cnt) || ecu_cnt.empty()) { + LOG_ERROR << "No ECU version report counter, please check the database!"; + // TODO: consider not sending manifest at all in this case, or maybe retry + } else { + report_counter = std::to_string(ecu_cnt[0].second + 1); + storage->saveEcuReportCounter(ecu_cnt[0].first, ecu_cnt[0].second + 1); + } + version_manifest[primary_ecu_serial.ToString()] = uptane_manifest->sign(primary_manifest, report_counter); for (auto it = secondaries.begin(); it != secondaries.end(); it++) { - Json::Value secmanifest = it->second->getManifest(); - if (secmanifest.isMember("signatures") && secmanifest.isMember("signed")) { - const auto public_key = it->second->getPublicKey(); - const std::string canonical = Json::FastWriter().write(secmanifest["signed"]); - const bool verified = public_key.VerifySignature(secmanifest["signatures"][0]["sig"].asString(), canonical); - - if (verified) { - version_manifest[it->first.ToString()] = secmanifest; + const Uptane::EcuSerial &ecu_serial = it->first; + Uptane::Manifest secmanifest; + try { + secmanifest = it->second->getManifest(); + } catch (const std::exception &ex) { + // Not critical; it might just be temporarily offline. + LOG_DEBUG << "Failed to get manifest from Secondary with serial " << ecu_serial << ": " << ex.what(); + } + + bool from_cache = false; + if (secmanifest.empty()) { + // Could not get the Secondary manifest directly, so just use a cached value. + std::string cached; + if (storage->loadCachedEcuManifest(ecu_serial, &cached)) { + LOG_WARNING << "Could not reach Secondary " << ecu_serial << ", sending a cached version of its manifest"; + secmanifest = Utils::parseJSON(cached); + from_cache = true; } else { - LOG_ERROR << "Secondary manifest verification failed, manifest: " << secmanifest; + LOG_ERROR << "Failed to get a valid manifest from Secondary with serial " << ecu_serial << " or from cache!"; + continue; + } + } + + bool verified = false; + try { + verified = secmanifest.verifySignature(it->second->getPublicKey()); + } catch (const std::exception &ex) { + LOG_ERROR << "Failed to get public key from Secondary with serial " << ecu_serial << ": " << ex.what(); + } + if (verified) { + version_manifest[ecu_serial.ToString()] = secmanifest; + if (!from_cache) { + storage->storeCachedEcuManifest(ecu_serial, Utils::jsonToCanonicalStr(secmanifest)); } } else { - LOG_ERROR << "Secondary manifest is corrupted or not signed, manifest: " << secmanifest; + // TODO(OTA-4305): send a corresponding event/report in this case + LOG_ERROR << "Invalid manifest or signature reported by Secondary: " + << " serial: " << ecu_serial << " manifest: " << secmanifest; } } manifest["ecu_version_manifests"] = version_manifest; @@ -249,6 +365,10 @@ Json::Value SotaUptaneClient::AssembleManifest() { std::string correlation_id; bool has_results = storage->loadDeviceInstallationResult(&dev_result, &raw_report, &correlation_id); if (has_results) { + if (!(dev_result.isSuccess() || dev_result.needCompletion())) { + director_repo.dropTargets(*storage); // fix for OTA-2587, listen to backend again after end of install + } + installation_report["result"] = dev_result.toJson(); installation_report["raw_report"] = raw_report; installation_report["correlation_id"] = correlation_id; @@ -276,68 +396,74 @@ Json::Value SotaUptaneClient::AssembleManifest() { return manifest; } -bool SotaUptaneClient::hasPendingUpdates() { return storage->hasPendingInstall(); } +bool SotaUptaneClient::hasPendingUpdates() const { return storage->hasPendingInstall(); } void SotaUptaneClient::initialize() { - LOG_DEBUG << "Checking if device is provisioned..."; - KeyManager keys(storage, config.keymanagerConfig()); - Initializer initializer(config.provision, storage, http, keys, secondaries); + provisioner_.Prepare(); - if (!initializer.isSuccessful()) { - throw std::runtime_error("Fatal error during provisioning or ECU device registration."); - } + uptane_manifest = std::make_shared(key_manager_, provisioner_.PrimaryEcuSerial()); - EcuSerials serials; - if (!storage->loadEcuSerials(&serials) || serials.size() == 0) { - throw std::runtime_error("Unable to load ECU serials after device registration."); - } + finalizeAfterReboot(); - uptane_manifest.setPrimaryEcuSerialHwId(serials[0]); - hw_ids.insert(serials[0]); + attemptProvision(); +} - verifySecondaries(); - LOG_DEBUG << "... provisioned OK"; +void SotaUptaneClient::requiresProvision() { + if (!attemptProvision()) { + throw ProvisioningFailed(); + } +} - finalizeAfterReboot(); +void SotaUptaneClient::requiresAlreadyProvisioned() { + if (provisioner_.CurrentState() != Provisioner::State::kOk) { + throw NotProvisionedYet(); + } } -bool SotaUptaneClient::updateDirectorMeta() { - if (!director_repo.updateMeta(*storage, *uptane_fetcher)) { - last_exception = director_repo.getLastException(); - return false; +void SotaUptaneClient::updateDirectorMeta() { + requiresProvision(); + try { + director_repo.updateMeta(*storage, *uptane_fetcher); + } catch (const std::exception &e) { + LOG_ERROR << "Director metadata update failed: " << e.what(); + throw; } - return true; } -bool SotaUptaneClient::updateImagesMeta() { - if (!images_repo.updateMeta(*storage, *uptane_fetcher)) { - last_exception = images_repo.getLastException(); - return false; +void SotaUptaneClient::updateImageMeta() { + requiresProvision(); + try { + image_repo.updateMeta(*storage, *uptane_fetcher); + } catch (const std::exception &e) { + LOG_ERROR << "Failed to update Image repo metadata: " << e.what(); + throw; } - return true; } -bool SotaUptaneClient::checkDirectorMetaOffline() { - if (!director_repo.checkMetaOffline(*storage)) { - last_exception = director_repo.getLastException(); - return false; +void SotaUptaneClient::checkDirectorMetaOffline() { + requiresAlreadyProvisioned(); + try { + director_repo.checkMetaOffline(*storage); + } catch (const std::exception &e) { + LOG_ERROR << "Failed to check Director metadata: " << e.what(); + throw; } - return true; } -bool SotaUptaneClient::checkImagesMetaOffline() { - if (!images_repo.checkMetaOffline(*storage)) { - last_exception = images_repo.getLastException(); - return false; +void SotaUptaneClient::checkImageMetaOffline() { + requiresAlreadyProvisioned(); + try { + image_repo.checkMetaOffline(*storage); + } catch (const std::exception &e) { + LOG_ERROR << "Failed to check Image repo metadata: " << e.what(); } - return true; } void SotaUptaneClient::computeDeviceInstallationResult(data::InstallationResult *result, - const std::string &correlation_id) { + std::string *raw_installation_report) { data::InstallationResult device_installation_result = data::InstallationResult(data::ResultCode::Numeric::kOk, "Device has been successfully installed"); - std::string raw_installation_report = "Installation succesful"; + std::string raw_ir = "Installation succesful"; do { std::vector> ecu_results; @@ -345,9 +471,8 @@ void SotaUptaneClient::computeDeviceInstallationResult(data::InstallationResult if (!storage->loadEcuInstallationResults(&ecu_results)) { // failed to load ECUs' installation result device_installation_result = data::InstallationResult(data::ResultCode::Numeric::kInternalError, - "Unable to get installation results from ecus"); - raw_installation_report = "Failed to load ECUs' installation result"; - + "Unable to get installation results from ECUs"); + raw_ir = "Failed to load ECU installation results"; break; } @@ -357,13 +482,14 @@ void SotaUptaneClient::computeDeviceInstallationResult(data::InstallationResult auto ecu_serial = r.first; auto installation_res = r.second; - if (hw_ids.find(ecu_serial) == hw_ids.end()) { + auto hw_id = getEcuHwId(ecu_serial); + + if (!hw_id) { // couldn't find any ECU with the given serial/ID device_installation_result = data::InstallationResult(data::ResultCode::Numeric::kInternalError, - "Unable to get installation results from ecus"); - - raw_installation_report = "Couldn't find any ECU with the given serial: " + ecu_serial.ToString(); + "Unable to get installation results from ECUs"); + raw_ir = "Failed to find an ECU with the given serial: " + ecu_serial.ToString(); break; } @@ -372,16 +498,15 @@ void SotaUptaneClient::computeDeviceInstallationResult(data::InstallationResult device_installation_result = data::InstallationResult(data::ResultCode::Numeric::kNeedCompletion, "ECU needs completion/finalization to be installed: " + ecu_serial.ToString()); - raw_installation_report = "ECU needs completion/finalization to be installed: " + ecu_serial.ToString(); - + raw_ir = "ECU needs completion/finalization to be installed: " + ecu_serial.ToString(); break; } // format: // ecu1_hwid:failure1|ecu2_hwid:failure2 if (!installation_res.isSuccess()) { - std::string ecu_code_str = hw_ids.at(ecu_serial).ToString() + ":" + installation_res.result_code.toString(); - result_code_err_str += (result_code_err_str != "" ? "|" : "") + ecu_code_str; + const std::string ecu_code_str = (*hw_id).ToString() + ":" + installation_res.result_code.ToString(); + result_code_err_str += (!result_code_err_str.empty() ? "|" : "") + ecu_code_str; } } @@ -389,8 +514,8 @@ void SotaUptaneClient::computeDeviceInstallationResult(data::InstallationResult // installation on at least one of the ECUs has failed device_installation_result = data::InstallationResult(data::ResultCode(data::ResultCode::Numeric::kInstallFailed, result_code_err_str), - "Installation failed on at least one of ECUs"); - raw_installation_report = "Installation failed on at least one of ECUs"; + "Installation failed on one or more ECUs"); + raw_ir = "Installation failed on one or more ECUs"; break; } @@ -401,54 +526,66 @@ void SotaUptaneClient::computeDeviceInstallationResult(data::InstallationResult *result = device_installation_result; } - // TODO: think of exception handling, the SQLite related code can throw exceptions - storage->storeDeviceInstallationResult(device_installation_result, raw_installation_report, correlation_id); + if (raw_installation_report != nullptr) { + *raw_installation_report = raw_ir; + } } -bool SotaUptaneClient::getNewTargets(std::vector *new_targets, unsigned int *ecus_count) { - std::vector targets = director_repo.getTargets(); - Uptane::EcuSerial primary_ecu_serial = uptane_manifest.getPrimaryEcuSerial(); +void SotaUptaneClient::getNewTargets(std::vector *new_targets, unsigned int *ecus_count) { + const std::vector targets = director_repo.getTargets().targets; + const Uptane::EcuSerial primary_ecu_serial = primaryEcuSerial(); if (ecus_count != nullptr) { *ecus_count = 0; } for (const Uptane::Target &target : targets) { bool is_new = false; for (const auto &ecu : target.ecus()) { - Uptane::EcuSerial ecu_serial = ecu.first; - Uptane::HardwareIdentifier hw_id = ecu.second; - - auto hwid_it = hw_ids.find(ecu_serial); - if (hwid_it == hw_ids.end()) { - LOG_ERROR << "Unknown ECU ID in director targets metadata: " << ecu_serial.ToString(); - last_exception = Uptane::BadEcuId(target.filename()); - return false; + const Uptane::EcuSerial ecu_serial = ecu.first; + const Uptane::HardwareIdentifier hw_id = ecu.second; + + // 5.4.4.6.8. If checking Targets metadata from the Director repository, + // and the ECU performing the verification is the Primary ECU, check that + // all listed ECU identifiers correspond to ECUs that are actually present + // in the vehicle. + const auto hw_id_known = getEcuHwId(ecu_serial); + if (!hw_id_known) { + // This is triggered if a Secondary is removed after an update was + // installed on it because of the empty targets optimization. + // Thankfully if the Director issues new Targets, it fixes itself. + LOG_ERROR << "Unknown ECU ID in Director Targets metadata: " << ecu_serial; + throw Uptane::BadEcuId(target.filename()); } - if (hwid_it->second != hw_id) { - LOG_ERROR << "Wrong hardware identifier for ECU " << ecu_serial.ToString(); - last_exception = Uptane::BadHardwareId(target.filename()); - return false; + if (*hw_id_known != hw_id) { + LOG_ERROR << "Wrong hardware identifier for ECU " << ecu_serial; + throw Uptane::BadHardwareId(target.filename()); } boost::optional current_version; if (!storage->loadInstalledVersions(ecu_serial.ToString(), ¤t_version, nullptr)) { - LOG_WARNING << "Could not load currently installed version for ECU ID: " << ecu_serial.ToString(); + LOG_WARNING << "Could not load currently installed version for ECU ID: " << ecu_serial; break; } if (!current_version) { - LOG_WARNING << "Current version for ECU ID: " << ecu_serial.ToString() << " is unknown"; + LOG_WARNING << "Current version for ECU ID: " << ecu_serial << " is unknown"; is_new = true; - } else if (current_version->filename() != target.filename()) { + } else if (current_version->MatchTarget(target)) { + // Do nothing; target is already installed. + } else if (current_version->filename() == target.filename()) { + LOG_ERROR << "Director Target filename matches currently installed version, but content differs!"; + throw Uptane::TargetContentMismatch(target.filename()); + } else { is_new = true; } + // Reject non-OSTree updates for the Primary if using OSTree. + // TODO(OTA-4939): Unify this with the check in + // PackageManagerFake::fetchTarget() and make it more generic. if (primary_ecu_serial == ecu_serial) { - if (!target.IsOstree() && - (config.pacman.type == PackageManager::kOstree || config.pacman.type == PackageManager::kOstreeDockerApp)) { + if (!target.IsOstree() && config.pacman.type == PACKAGE_MANAGER_OSTREE) { LOG_ERROR << "Cannot install a non-OSTree package on an OSTree system"; - last_exception = Uptane::InvalidTarget(target.filename()); - return false; + throw Uptane::InvalidTarget(target.filename()); } } @@ -461,9 +598,9 @@ bool SotaUptaneClient::getNewTargets(std::vector *new_targets, u new_targets->push_back(target); } } - return true; } +// NOLINTNEXTLINE(misc-no-recursion) std::unique_ptr SotaUptaneClient::findTargetHelper(const Uptane::Targets &cur_targets, const Uptane::Target &queried_target, const int level, const bool terminating, @@ -499,16 +636,17 @@ std::unique_ptr SotaUptaneClient::findTargetHelper(const Uptane: // Target name matches one of the patterns auto delegation = - Uptane::getTrustedDelegation(delegate_role, cur_targets, images_repo, *storage, *uptane_fetcher, offline); + Uptane::getTrustedDelegation(delegate_role, cur_targets, image_repo, *storage, *uptane_fetcher, offline); if (delegation.isExpired(TimeStamp::Now())) { continue; } auto is_terminating = cur_targets.terminating_role_.find(delegate_role); if (is_terminating == cur_targets.terminating_role_.end()) { - throw Uptane::Exception("images", "Inconsistent delegations"); + throw Uptane::Exception("image", "Inconsistent delegations"); } + // NOLINTNEXTLINE(misc-no-recursion) auto found_target = findTargetHelper(delegation, queried_target, level + 1, is_terminating->second, offline); if (found_target != nullptr) { return found_target; @@ -520,7 +658,7 @@ std::unique_ptr SotaUptaneClient::findTargetHelper(const Uptane: std::unique_ptr SotaUptaneClient::findTargetInDelegationTree(const Uptane::Target &target, const bool offline) { - auto toplevel_targets = images_repo.getTargets(); + auto toplevel_targets = image_repo.getTargets(); if (toplevel_targets == nullptr) { return std::unique_ptr(nullptr); } @@ -530,20 +668,30 @@ std::unique_ptr SotaUptaneClient::findTargetInDelegationTree(con result::Download SotaUptaneClient::downloadImages(const std::vector &targets, const api::FlowControlToken *token) { + requiresAlreadyProvisioned(); // Uptane step 4 - download all the images and verify them against the metadata (for OSTree - pull without // deploying) std::lock_guard guard(download_mutex); result::Download result; std::vector downloaded_targets; - result::UpdateStatus update_status = checkUpdatesOffline(targets); + result::UpdateStatus update_status; + try { + update_status = checkUpdatesOffline(targets); + } catch (const std::exception &e) { + last_exception = std::current_exception(); + update_status = result::UpdateStatus::kError; + } + + if (update_status == result::UpdateStatus::kNoUpdatesAvailable) { + result = result::Download({}, result::DownloadStatus::kNothingToDownload, ""); + } else if (update_status == result::UpdateStatus::kError) { + result = result::Download(downloaded_targets, result::DownloadStatus::kError, "Error rechecking stored metadata."); + storeInstallationFailure( + data::InstallationResult(data::ResultCode::Numeric::kInternalError, "Error rechecking stored metadata.")); + } + if (update_status != result::UpdateStatus::kUpdatesAvailable) { - if (update_status == result::UpdateStatus::kNoUpdatesAvailable) { - result = result::Download({}, result::DownloadStatus::kNothingToDownload, ""); - } else { - result = - result::Download(downloaded_targets, result::DownloadStatus::kError, "Error rechecking stored metadata."); - } sendEvent(result); return result; } @@ -558,20 +706,15 @@ result::Download SotaUptaneClient::downloadImages(const std::vectorstoreDeviceInstallationResult(device_installation_result, "", correlation_id); - // Fix for OTA-2587, listen to backend again after end of install. - director_repo.dropTargets(*storage); + storeInstallationFailure( + data::InstallationResult(data::ResultCode::Numeric::kDownloadFailed, "Target download failed.")); } sendEvent(result); @@ -590,36 +733,56 @@ void SotaUptaneClient::reportResume() { std::pair SotaUptaneClient::downloadImage(const Uptane::Target &target, const api::FlowControlToken *token) { - // TODO: support downloading encrypted targets from director - const std::string &correlation_id = director_repo.getCorrelationId(); - // send an event for all ecus that are touched by this target + // send an event for all ECUs that are touched by this target for (const auto &ecu : target.ecus()) { report_queue->enqueue(std_::make_unique(ecu.first, correlation_id)); } - KeyManager keys(storage, config.keymanagerConfig()); - keys.loadKeys(); - auto prog_cb = [this](const Uptane::Target &t, const std::string description, unsigned int progress) { - report_progress_cb(events_channel.get(), t, description, progress); - }; + // Note: handle exceptions from here so that we can send reports and + // DownloadTargetComplete events in all cases. We might want to move these to + // downloadImages but aktualizr-lite currently calls this method directly. bool success = false; - const int max_tries = 3; - int tries = 0; - std::chrono::milliseconds wait(500); - - for (; tries < max_tries; tries++) { - success = package_manager_->fetchTarget(target, *uptane_fetcher, keys, prog_cb, token); - if (success) { - break; - } else if (tries < max_tries - 1) { - std::this_thread::sleep_for(wait); - wait *= 2; + try { + KeyManager keys(storage, config.keymanagerConfig()); + keys.loadKeys(); + auto prog_cb = [this](const Uptane::Target &t, const std::string &description, unsigned int progress) { + report_progress_cb(events_channel.get(), t, description, progress); + }; + + const Uptane::EcuSerial &primary_ecu_serial = primaryEcuSerial(); + + if (target.IsForEcu(primary_ecu_serial) || !target.IsOstree()) { + const int max_tries = 3; + int tries = 0; + std::chrono::milliseconds wait(500); + + for (; tries < max_tries; tries++) { + success = package_manager_->fetchTarget(target, *uptane_fetcher, keys, prog_cb, token); + // Skip trying to fetch the 'target' if control flow token transaction + // was set to the 'abort' or 'pause' state, see the CommandQueue and FlowControlToken. + if (success || (token != nullptr && !token->canContinue(false))) { + break; + } else if (tries < max_tries - 1) { + std::this_thread::sleep_for(wait); + wait *= 2; + } + } + if (!success) { + LOG_ERROR << "Download unsuccessful after " << tries << " attempts."; + // TODO: Throw more meaningful exceptions. Failure can be caused by more + // than just a hash mismatch. However, this is purely internal and + // mostly just relevant for testing. + throw Uptane::TargetHashMismatch(target.filename()); + } + } else { + // we emulate successful download in case of the Secondary OSTree update + success = true; } - } - if (!success) { - LOG_ERROR << "Download unsuccessful after " << tries << " attempts."; + } catch (const std::exception &e) { + LOG_ERROR << "Error downloading image: " << e.what(); + last_exception = std::current_exception(); } // send this asynchronously before `sendEvent`, so that the report timestamp @@ -632,33 +795,21 @@ std::pair SotaUptaneClient::downloadImage(const Uptane::Ta return {success, target}; } -bool SotaUptaneClient::uptaneIteration(std::vector *targets, unsigned int *ecus_count) { - if (!updateDirectorMeta()) { - LOG_ERROR << "Failed to update director metadata: " << last_exception.what(); - return false; - } +void SotaUptaneClient::uptaneIteration(std::vector *targets, unsigned int *ecus_count) { + updateDirectorMeta(); + std::vector tmp_targets; unsigned int ecus; - if (!getNewTargets(&tmp_targets, &ecus)) { - LOG_ERROR << "Inconsistency between director metadata and existent ECUs"; - return false; - } - - if (tmp_targets.empty()) { - if (targets != nullptr) { - *targets = std::move(tmp_targets); - } - if (ecus_count != nullptr) { - *ecus_count = ecus; - } - return true; + try { + getNewTargets(&tmp_targets, &ecus); + } catch (const std::exception &e) { + LOG_ERROR << "Inconsistency between Director metadata and available ECUs: " << e.what(); + throw; } - LOG_INFO << "got new updates"; - - if (!updateImagesMeta()) { - LOG_ERROR << "Failed to update images metadata: " << last_exception.what(); - return false; + if (!tmp_targets.empty()) { + LOG_INFO << "New updates found in Director metadata. Checking Image repo metadata..."; + updateImageMeta(); } if (targets != nullptr) { @@ -667,55 +818,58 @@ bool SotaUptaneClient::uptaneIteration(std::vector *targets, uns if (ecus_count != nullptr) { *ecus_count = ecus; } - return true; } -bool SotaUptaneClient::uptaneOfflineIteration(std::vector *targets, unsigned int *ecus_count) { - if (!checkDirectorMetaOffline()) { - LOG_ERROR << "Failed to check director metadata: " << last_exception.what(); - return false; - } +void SotaUptaneClient::uptaneOfflineIteration(std::vector *targets, unsigned int *ecus_count) { + checkDirectorMetaOffline(); + std::vector tmp_targets; unsigned int ecus; - if (!getNewTargets(&tmp_targets, &ecus)) { - LOG_ERROR << "Inconsistency between director metadata and existent ECUs"; - return false; + try { + getNewTargets(&tmp_targets, &ecus); + } catch (const std::exception &e) { + LOG_ERROR << "Inconsistency between Director metadata and available ECUs: " << e.what(); + throw; } - if (tmp_targets.empty()) { - *targets = std::move(tmp_targets); - if (ecus_count != nullptr) { - *ecus_count = ecus; - } - return true; + if (!tmp_targets.empty()) { + LOG_DEBUG << "New updates found in stored Director metadata. Checking stored Image repo metadata..."; + checkImageMetaOffline(); } - if (!checkImagesMetaOffline()) { - LOG_ERROR << "Failed to check images metadata: " << last_exception.what(); - return false; + if (targets != nullptr) { + *targets = std::move(tmp_targets); } - - *targets = std::move(tmp_targets); if (ecus_count != nullptr) { *ecus_count = ecus; } - return true; } void SotaUptaneClient::sendDeviceData() { + requiresProvision(); + reportHwInfo(); reportInstalledPackages(); reportNetworkInfo(); - putManifestSimple(); + reportAktualizrConfiguration(); sendEvent(); } result::UpdateCheck SotaUptaneClient::fetchMeta() { + requiresProvision(); + result::UpdateCheck result; reportNetworkInfo(); if (hasPendingUpdates()) { + // if there are some pending updates check if the Secondaries' pending updates have been applied + LOG_INFO << "The current update is pending. Check if pending ECUs has been already updated"; + checkAndUpdatePendingSecondaries(); + } + + if (hasPendingUpdates()) { + // if there are still some pending updates just return, don't check for new updates // no need in update checking if there are some pending updates LOG_INFO << "An update is pending. Skipping check for update until installation is complete."; return result::UpdateCheck({}, 0, result::UpdateStatus::kError, Json::nullValue, @@ -737,47 +891,61 @@ result::UpdateCheck SotaUptaneClient::checkUpdates() { std::vector updates; unsigned int ecus_count = 0; - if (!uptaneIteration(&updates, &ecus_count)) { + try { + uptaneIteration(&updates, &ecus_count); + } catch (const std::exception &e) { + last_exception = std::current_exception(); result = result::UpdateCheck({}, 0, result::UpdateStatus::kError, Json::nullValue, "Could not update metadata."); return result; } std::string director_targets; - storage->loadNonRoot(&director_targets, Uptane::RepositoryType::Director(), Uptane::Role::Targets()); + if (!storage->loadNonRoot(&director_targets, Uptane::RepositoryType::Director(), Uptane::Role::Targets())) { + result = result::UpdateCheck({}, 0, result::UpdateStatus::kError, Json::nullValue, "Could not update metadata."); + return result; + } if (updates.empty()) { LOG_DEBUG << "No new updates found in Uptane metadata."; - result = result::UpdateCheck(updates, ecus_count, result::UpdateStatus::kNoUpdatesAvailable, - Utils::parseJSON(director_targets), ""); + result = + result::UpdateCheck({}, 0, result::UpdateStatus::kNoUpdatesAvailable, Utils::parseJSON(director_targets), ""); return result; } - // For every target in the Director Targets metadata, walk the delegation - // tree (if necessary) and find a matching target in the Images repo - // metadata. - for (auto &target : updates) { - auto images_target = findTargetInDelegationTree(target, false); - if (images_target == nullptr) { - // TODO: Could also be a missing target or delegation expiration. - last_exception = Uptane::TargetMismatch(target.filename()); - LOG_ERROR << "No matching target in images targets metadata for " << target; - result = result::UpdateCheck(updates, ecus_count, result::UpdateStatus::kError, - Utils::parseJSON(director_targets), "Target mismatch."); - return result; - } - // If the URL from the Director is unset, but the URL from the Images repo - // is set, use that. - if (target.uri().empty() && !images_target->uri().empty()) { - target.setUri(images_target->uri()); + // 5.4.4.2.10.: Verify that Targets metadata from the Director and Image + // repositories match. A Primary ECU MUST perform this check on metadata for + // all images listed in the Targets metadata file from the Director + // repository. + try { + for (auto &target : updates) { + auto image_target = findTargetInDelegationTree(target, false); + if (image_target == nullptr) { + // TODO: Could also be a missing target or delegation expiration. + LOG_ERROR << "No matching target in Image repo Targets metadata for " << target; + throw Uptane::TargetMismatch(target.filename()); + } + // If the URL from the Director is unset, but the URL from the Image repo + // is set, use that. + if (target.uri().empty() && !image_target->uri().empty()) { + target.setUri(image_target->uri()); + } } + } catch (const std::exception &e) { + last_exception = std::current_exception(); + LOG_ERROR << e.what(); + result = result::UpdateCheck({}, 0, result::UpdateStatus::kError, Utils::parseJSON(director_targets), + "Target mismatch."); + storeInstallationFailure( + data::InstallationResult(data::ResultCode::Numeric::kVerificationFailed, "Metadata verification failed.")); + return result; } result = result::UpdateCheck(updates, ecus_count, result::UpdateStatus::kUpdatesAvailable, Utils::parseJSON(director_targets), ""); if (updates.size() == 1) { - LOG_INFO << "1 new update found in Uptane metadata."; + LOG_INFO << "1 new update found in both Director and Image repo metadata."; } else { - LOG_INFO << updates.size() << " new updates found in Uptane metadata."; + LOG_INFO << updates.size() << " new updates found in both Director and Image repo metadata."; } return result; } @@ -796,31 +964,34 @@ result::UpdateStatus SotaUptaneClient::checkUpdatesOffline(const std::vector director_targets; unsigned int ecus_count = 0; - if (!uptaneOfflineIteration(&director_targets, &ecus_count)) { - LOG_ERROR << "Invalid Uptane metadata in storage."; - return result::UpdateStatus::kError; + try { + uptaneOfflineIteration(&director_targets, &ecus_count); + } catch (const std::exception &e) { + LOG_ERROR << "Aborting; invalid Uptane metadata in storage."; + throw; } if (director_targets.empty()) { - LOG_ERROR << "No new updates found in Uptane metadata, but expected " << targets.size() << "."; + LOG_ERROR << "No new updates found while rechecking stored Director Targets metadata, but " << targets.size() + << " target(s) were requested."; return result::UpdateStatus::kNoUpdatesAvailable; } // For every target in the Director Targets metadata, walk the delegation - // tree (if necessary) and find a matching target in the Images repo + // tree (if necessary) and find a matching target in the Image repo // metadata. for (const auto &target : targets) { TargetCompare target_comp(target); const auto it = std::find_if(director_targets.cbegin(), director_targets.cend(), target_comp); if (it == director_targets.cend()) { - LOG_ERROR << "No matching target in director targets metadata for " << target; - return result::UpdateStatus::kError; + LOG_ERROR << "No matching target in Director Targets metadata for " << target; + throw Uptane::Exception(Uptane::RepositoryType::DIRECTOR, "No matching target in Director Targets metadata"); } - const auto images_target = findTargetInDelegationTree(target, true); - if (images_target == nullptr) { - LOG_ERROR << "No matching target in images targets metadata for " << target; - return result::UpdateStatus::kError; + const auto image_target = findTargetInDelegationTree(target, true); + if (image_target == nullptr) { + LOG_ERROR << "No matching target in Image repo Targets metadata for " << target; + throw Uptane::Exception(Uptane::RepositoryType::IMAGE, "No matching target in Director Targets metadata"); } } @@ -828,61 +999,91 @@ result::UpdateStatus SotaUptaneClient::checkUpdatesOffline(const std::vector &updates) { - result::Install result; + requiresAlreadyProvisioned(); const std::string &correlation_id = director_repo.getCorrelationId(); - // clear all old results first - storage->clearInstallationResults(); + // put most of the logic in a lambda so that we can take care of common + // post-operations + result::Install r; + std::string raw_report; - // Recheck the Uptane metadata and make sure the requested updates are - // consistent with the stored metadata. - result::UpdateStatus update_status = checkUpdatesOffline(updates); - if (update_status != result::UpdateStatus::kUpdatesAvailable) { - if (update_status == result::UpdateStatus::kNoUpdatesAvailable) { - result.dev_report = {false, data::ResultCode::Numeric::kAlreadyProcessed, ""}; - } else { - result.dev_report = {false, data::ResultCode::Numeric::kInternalError, ""}; + std::tie(r, raw_report) = [this, &updates, &correlation_id]() -> std::tuple { + result::Install result; + + // Recheck the Uptane metadata and make sure the requested updates are + // consistent with the stored metadata. + result::UpdateStatus update_status; + try { + update_status = checkUpdatesOffline(updates); + } catch (const std::exception &e) { + last_exception = std::current_exception(); + update_status = result::UpdateStatus::kError; } - storage->storeDeviceInstallationResult(result.dev_report, "Stored Uptane metadata is invalid", correlation_id); - sendEvent(result); - return result; - } - // Recheck the downloaded update hashes. - for (const auto &update : updates) { - if (package_manager_->verifyTarget(update) != TargetStatus::kGood) { - result.dev_report = {false, data::ResultCode::Numeric::kInternalError, ""}; - storage->storeDeviceInstallationResult(result.dev_report, "Downloaded target is invalid", correlation_id); - sendEvent(result); - return result; + if (update_status != result::UpdateStatus::kUpdatesAvailable) { + if (update_status == result::UpdateStatus::kNoUpdatesAvailable) { + result.dev_report = {false, data::ResultCode::Numeric::kAlreadyProcessed, ""}; + } else { + result.dev_report = {false, data::ResultCode::Numeric::kInternalError, ""}; + } + return std::make_tuple(result, "Stored Uptane metadata is invalid"); + } + + Uptane::EcuSerial primary_ecu_serial = primaryEcuSerial(); + // Recheck the downloaded update hashes. + for (const auto &update : updates) { + if (update.IsForEcu(primary_ecu_serial) || !update.IsOstree()) { + // download binary images for any target, for both Primary and Secondary + // download an OSTree revision just for Primary, Secondary will do it by itself + // Primary cannot verify downloaded OSTree targets for Secondaries, + // Downloading of Secondary's OSTree repo revision to the Primary's can fail + // if they differ signficantly as OSTree has a certain cap/limit of the diff it pulls + if (package_manager_->verifyTarget(update) != TargetStatus::kGood) { + result.dev_report = {false, data::ResultCode::Numeric::kInternalError, ""}; + return std::make_tuple(result, "Downloaded target is invalid"); + } + } + } + + // wait some time for Secondaries to come up + // note: this fail after a time out but will be retried at the next install + // phase if the targets have not been changed. This is done to avoid being + // stuck in an unrecoverable state here + if (!waitSecondariesReachable(updates)) { + result.dev_report = {false, data::ResultCode::Numeric::kInternalError, "Unreachable Secondary"}; + return std::make_tuple(result, "Secondaries were not available"); } - } - // Uptane step 5 (send time to all ECUs) is not implemented yet. - Uptane::EcuSerial primary_ecu_serial = uptane_manifest.getPrimaryEcuSerial(); - std::vector primary_updates = findForEcu(updates, primary_ecu_serial); - // 6 - send metadata to all the ECUs - sendMetadataToEcus(updates); + // Uptane step 5 (send time to all ECUs) is not implemented yet. + std::vector primary_updates = findForEcu(updates, primary_ecu_serial); - // 7 - send images to ECUs (deploy for OSTree) - if (primary_updates.size() != 0u) { - // assuming one OSTree OS per primary => there can be only one OSTree update - Uptane::Target primary_update = primary_updates[0]; - primary_update.setCorrelationId(correlation_id); + // 6 - send metadata to all the ECUs + data::InstallationResult metadata_res; + std::string rr; + sendMetadataToEcus(updates, &metadata_res, &rr); + if (!metadata_res.isSuccess()) { + result.dev_report = std::move(metadata_res); + return std::make_tuple(result, rr); + } + + // 7 - send images to ECUs (deploy for OSTree) + if (!primary_updates.empty()) { + // assuming one OSTree OS per Primary => there can be only one OSTree update + Uptane::Target primary_update = primary_updates[0]; + primary_update.setCorrelationId(correlation_id); - report_queue->enqueue(std_::make_unique(primary_ecu_serial, correlation_id)); - sendEvent(primary_ecu_serial); + report_queue->enqueue(std_::make_unique(primary_ecu_serial, correlation_id)); + sendEvent(primary_ecu_serial); - data::InstallationResult install_res; - if (!isInstalledOnPrimary(primary_update)) { + data::InstallationResult install_res; // notify the bootloader before installation happens, because installation is not atomic and // a false notification doesn't hurt when rollbacks are implemented - bootloader->updateNotify(); + package_manager_->updateNotify(); install_res = PackageInstallSetResult(primary_update); if (install_res.result_code.num_code == data::ResultCode::Numeric::kNeedCompletion) { // update needs a reboot, send distinct EcuInstallationApplied event report_queue->enqueue(std_::make_unique(primary_ecu_serial, correlation_id)); - sendEvent(primary_ecu_serial, true); // TODO: distinguish from success here? + sendEvent(primary_ecu_serial, true); } else if (install_res.result_code.num_code == data::ResultCode::Numeric::kOk) { storage->saveEcuInstallationResult(primary_ecu_serial, install_res); report_queue->enqueue( @@ -895,36 +1096,29 @@ result::Install SotaUptaneClient::uptaneInstall(const std::vector(primary_ecu_serial, correlation_id, false)); sendEvent(primary_ecu_serial, false); } + result.ecu_reports.emplace(result.ecu_reports.begin(), primary_update, primary_ecu_serial, install_res); } else { - install_res = data::InstallationResult(data::ResultCode::Numeric::kAlreadyProcessed, "Package already installed"); - storage->saveEcuInstallationResult(primary_ecu_serial, install_res); - // TODO: distinguish this case from regular failure for local and remote - // event reporting - report_queue->enqueue(std_::make_unique(uptane_manifest.getPrimaryEcuSerial(), - correlation_id, false)); - sendEvent(uptane_manifest.getPrimaryEcuSerial(), false); + LOG_INFO << "No update to install on Primary"; } - result.ecu_reports.emplace(result.ecu_reports.begin(), primary_update, uptane_manifest.getPrimaryEcuSerial(), - install_res); - // TODO: other updates for primary - } else { - LOG_INFO << "No update to install on primary"; - } - auto sec_reports = sendImagesToEcus(updates); - result.ecu_reports.insert(result.ecu_reports.end(), sec_reports.begin(), sec_reports.end()); - computeDeviceInstallationResult(&result.dev_report, correlation_id); - sendEvent(result); + auto sec_reports = sendImagesToEcus(updates); + result.ecu_reports.insert(result.ecu_reports.end(), sec_reports.begin(), sec_reports.end()); + computeDeviceInstallationResult(&result.dev_report, &rr); - if (!(result.dev_report.isSuccess() || result.dev_report.needCompletion())) { - director_repo.dropTargets(*storage); // fix for OTA-2587, listen to backend again after end of install - } + return std::make_tuple(result, rr); + }(); - return result; + storage->storeDeviceInstallationResult(r.dev_report, raw_report, correlation_id); + + sendEvent(r); + + return r; } result::CampaignCheck SotaUptaneClient::campaignCheck() { - auto campaigns = campaign::fetchAvailableCampaigns(*http, config.tls.server); + requiresProvision(); + + auto campaigns = campaign::Campaign::fetchAvailableCampaigns(*http, config.tls.server); for (const auto &c : campaigns) { LOG_INFO << "Campaign: " << c.name; LOG_INFO << "Campaign id: " << c.id; @@ -938,23 +1132,36 @@ result::CampaignCheck SotaUptaneClient::campaignCheck() { } void SotaUptaneClient::campaignAccept(const std::string &campaign_id) { + requiresAlreadyProvisioned(); + sendEvent(); report_queue->enqueue(std_::make_unique(campaign_id)); } void SotaUptaneClient::campaignDecline(const std::string &campaign_id) { + requiresAlreadyProvisioned(); + sendEvent(); report_queue->enqueue(std_::make_unique(campaign_id)); } void SotaUptaneClient::campaignPostpone(const std::string &campaign_id) { + requiresAlreadyProvisioned(); + sendEvent(); report_queue->enqueue(std_::make_unique(campaign_id)); } bool SotaUptaneClient::isInstallCompletionRequired() { - bool force_install_completion = (hasPendingUpdates() && config.uptane.force_install_completion); - return force_install_completion; + std::vector> pending_ecus; + storage->getPendingEcus(&pending_ecus); + auto primary_ecu_serial = provisioner_.PrimaryEcuSerial(); + bool pending_for_ecu = std::find_if(pending_ecus.begin(), pending_ecus.end(), + [&primary_ecu_serial](const std::pair &ecu) -> bool { + return ecu.first == primary_ecu_serial; + }) != pending_ecus.end(); + + return pending_for_ecu && config.uptane.force_install_completion; } void SotaUptaneClient::completeInstall() { @@ -972,15 +1179,23 @@ bool SotaUptaneClient::putManifestSimple(const Json::Value &custom) { return false; } + static bool connected = true; auto manifest = AssembleManifest(); - if (custom != Json::nullValue) { + if (!custom.empty()) { manifest["custom"] = custom; } - auto signed_manifest = uptane_manifest.signManifest(manifest); + auto signed_manifest = uptane_manifest->sign(manifest); HttpResponse response = http->put(config.uptane.director_server + "/manifest", signed_manifest); if (response.isOk()) { + if (!connected) { + LOG_INFO << "Connectivity is restored."; + } + connected = true; storage->clearInstallationResults(); + return true; + } else { + connected = false; } LOG_WARNING << "Put manifest request failed: " << response.getStatusStr(); @@ -988,150 +1203,215 @@ bool SotaUptaneClient::putManifestSimple(const Json::Value &custom) { } bool SotaUptaneClient::putManifest(const Json::Value &custom) { + requiresProvision(); + bool success = putManifestSimple(custom); sendEvent(success); return success; } -// Check stored secondaries list against secondaries known to aktualizr. -void SotaUptaneClient::verifySecondaries() { - storage->clearMisconfiguredEcus(); - EcuSerials serials; - if (!storage->loadEcuSerials(&serials) || serials.empty()) { - LOG_ERROR << "No ECU serials found in storage!"; - return; - } +bool SotaUptaneClient::waitSecondariesReachable(const std::vector &updates) { + std::map targeted_secondaries; + const Uptane::EcuSerial &primary_ecu_serial = primaryEcuSerial(); + for (const auto &t : updates) { + for (const auto &ecu : t.ecus()) { + if (ecu.first == primary_ecu_serial) { + continue; + } + auto f = secondaries.find(ecu.first); + if (f == secondaries.end()) { + LOG_ERROR << "Target " << t << " has an unknown ECU serial."; + continue; + } - std::vector misconfigured_ecus; - std::vector found(serials.size(), false); - SerialCompare primary_comp(uptane_manifest.getPrimaryEcuSerial()); - EcuSerials::const_iterator store_it; - store_it = std::find_if(serials.cbegin(), serials.cend(), primary_comp); - if (store_it == serials.cend()) { - LOG_ERROR << "Primary ECU serial " << uptane_manifest.getPrimaryEcuSerial() << " not found in storage!"; - misconfigured_ecus.emplace_back(uptane_manifest.getPrimaryEcuSerial(), Uptane::HardwareIdentifier(""), - EcuState::kOld); - } else { - found[static_cast(std::distance(serials.cbegin(), store_it))] = true; - } - - std::map>::const_iterator it; - for (it = secondaries.cbegin(); it != secondaries.cend(); ++it) { - SerialCompare secondary_comp(it->second->getSerial()); - store_it = std::find_if(serials.cbegin(), serials.cend(), secondary_comp); - if (store_it == serials.cend()) { - LOG_ERROR << "Secondary ECU serial " << it->second->getSerial() << " (hardware ID " << it->second->getHwId() - << ") not found in storage!"; - misconfigured_ecus.emplace_back(it->second->getSerial(), it->second->getHwId(), EcuState::kNotRegistered); - } else if (found[static_cast(std::distance(serials.cbegin(), store_it))]) { - LOG_ERROR << "Secondary ECU serial " << it->second->getSerial() << " (hardware ID " << it->second->getHwId() - << ") has a duplicate entry in storage!"; - } else { - found[static_cast(std::distance(serials.cbegin(), store_it))] = true; + targeted_secondaries[ecu.first] = f->second.get(); } } - std::vector::iterator found_it; - for (found_it = found.begin(); found_it != found.end(); ++found_it) { - if (!*found_it) { - auto not_registered = serials[static_cast(std::distance(found.begin(), found_it))]; - LOG_WARNING << "ECU serial " << not_registered.first << " in storage was not reported to aktualizr!"; - misconfigured_ecus.emplace_back(not_registered.first, not_registered.second, EcuState::kOld); - } + if (targeted_secondaries.empty()) { + return true; } - storage->storeMisconfiguredEcus(misconfigured_ecus); -} + LOG_INFO << "Waiting for Secondaries to connect to start installation..."; -void SotaUptaneClient::rotateSecondaryRoot(Uptane::RepositoryType repo, Uptane::SecondaryInterface &secondary) { - std::string latest_root; + auto deadline = std::chrono::system_clock::now() + std::chrono::seconds(config.uptane.secondary_preinstall_wait_sec); + while (std::chrono::system_clock::now() <= deadline) { + if (targeted_secondaries.empty()) { + return true; + } - if (!storage->loadLatestRoot(&latest_root, repo)) { - LOG_ERROR << "No root metadata to send"; - return; + for (auto sec_it = targeted_secondaries.begin(); sec_it != targeted_secondaries.end();) { + bool connected = false; + try { + connected = sec_it->second->ping(); + } catch (const std::exception &ex) { + LOG_DEBUG << "Failed to ping Secondary with serial " << sec_it->first << ": " << ex.what(); + } + if (connected) { + sec_it = targeted_secondaries.erase(sec_it); + } else { + sec_it++; + } + } + std::this_thread::sleep_for(std::chrono::seconds(1)); + } + + for (const auto &sec : targeted_secondaries) { + LOG_ERROR << "Secondary with serial " << sec.second->getSerial() << " failed to connect!"; } - int last_root_version = Uptane::extractVersionUntrusted(latest_root); + return false; +} - int sec_root_version = secondary.getRootVersion((repo == Uptane::RepositoryType::Director())); - if (sec_root_version >= 0) { - for (int v = sec_root_version + 1; v <= last_root_version; v++) { +void SotaUptaneClient::storeInstallationFailure(const data::InstallationResult &result) { + // Store installation report to inform Director of the update failure before + // we actually got to the install step. + const std::string &correlation_id = director_repo.getCorrelationId(); + storage->storeDeviceInstallationResult(result, "", correlation_id); + // Fix for OTA-2587, listen to backend again after end of install. + director_repo.dropTargets(*storage); +} + +/* If the Root has been rotated more than once, we need to provide the Secondary + * with the incremental steps from what it has now. */ +data::InstallationResult SotaUptaneClient::rotateSecondaryRoot(Uptane::RepositoryType repo, + SecondaryInterface &secondary) { + std::string latest_root; + if (!storage->loadLatestRoot(&latest_root, repo)) { + LOG_ERROR << "Error reading Root metadata"; + return data::InstallationResult(data::ResultCode::Numeric::kInternalError, "Error reading Root metadata"); + } + + data::InstallationResult result{data::ResultCode::Numeric::kOk, ""}; + const int last_root_version = Uptane::extractVersionUntrusted(latest_root); + const int sec_root_version = secondary.getRootVersion((repo == Uptane::RepositoryType::Director())); + // If sec_root_version is 0, assume either the Secondary doesn't have Root + // metadata or doesn't support the Root version request. Continue on and hope + // for the best. + if (sec_root_version < 0) { + LOG_WARNING << "Secondary with serial " << secondary.getSerial() << " reported an invalid " << repo + << " repo Root version: " << sec_root_version; + result = + data::InstallationResult(data::ResultCode::Numeric::kInternalError, + "Secondary with serial " + secondary.getSerial().ToString() + " reported an invalid " + + repo.ToString() + " repo Root version: " + std::to_string(sec_root_version)); + } else if (sec_root_version > 0 && last_root_version - sec_root_version > 1) { + // Only send intermediate Roots that would otherwise be skipped. The latest + // will be sent with the complete set of the latest metadata. + for (int v = sec_root_version + 1; v < last_root_version; v++) { std::string root; if (!storage->loadRoot(&root, repo, Uptane::Version(v))) { - LOG_WARNING << "Couldn't find root meta in the storage, trying remote repo"; - if (!uptane_fetcher->fetchRole(&root, Uptane::kMaxRootSize, repo, Uptane::Role::Root(), Uptane::Version(v))) { - // TODO: looks problematic, robust procedure needs to be defined - LOG_ERROR << "Root metadata could not be fetched, skipping to the next secondary"; - return; + LOG_WARNING << "Couldn't find Root metadata in the storage, trying remote repo"; + try { + uptane_fetcher->fetchRole(&root, Uptane::kMaxRootSize, repo, Uptane::Role::Root(), Uptane::Version(v)); + } catch (const std::exception &e) { + LOG_ERROR << "Root metadata could not be fetched for Secondary with serial " << secondary.getSerial() + << ", skipping to the next Secondary"; + result = data::InstallationResult(data::ResultCode::Numeric::kInternalError, + "Root metadata could not be fetched for Secondary with serial " + + secondary.getSerial().ToString() + ", skipping to the next Secondary"); + break; } } - if (!secondary.putRoot(root, repo == Uptane::RepositoryType::Director())) { - LOG_ERROR << "Sending metadata to " << secondary.getSerial() << " failed"; + try { + result = secondary.putRoot(root, repo == Uptane::RepositoryType::Director()); + } catch (const std::exception &ex) { + result = data::InstallationResult(data::ResultCode::Numeric::kInternalError, ex.what()); + } + if (!result.isSuccess()) { + LOG_ERROR << "Sending Root metadata to Secondary with serial " << secondary.getSerial() + << " failed: " << result.result_code << " " << result.description; + break; } } } + return result; } -// TODO: the function can't currently return any errors. The problem of error reporting from secondaries should -// be solved on a system (backend+frontend) error. -// TODO: the function blocks until it updates all the secondaries. Consider non-blocking operation. -void SotaUptaneClient::sendMetadataToEcus(const std::vector &targets) { - Uptane::RawMetaPack meta; - if (!storage->loadLatestRoot(&meta.director_root, Uptane::RepositoryType::Director())) { - LOG_ERROR << "No director root metadata to send"; - return; - } - if (!storage->loadNonRoot(&meta.director_targets, Uptane::RepositoryType::Director(), Uptane::Role::Targets())) { - LOG_ERROR << "No director targets metadata to send"; - return; - } - if (!storage->loadLatestRoot(&meta.image_root, Uptane::RepositoryType::Image())) { - LOG_ERROR << "No images root metadata to send"; - return; - } - if (!storage->loadNonRoot(&meta.image_timestamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp())) { - LOG_ERROR << "No images timestamp metadata to send"; - return; - } - if (!storage->loadNonRoot(&meta.image_snapshot, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot())) { - LOG_ERROR << "No images snapshot metadata to send"; - return; - } - if (!storage->loadNonRoot(&meta.image_targets, Uptane::RepositoryType::Image(), Uptane::Role::Targets())) { - LOG_ERROR << "No images targets metadata to send"; - return; - } - - // target images should already have been downloaded to metadata_path/targets/ - for (auto targets_it = targets.cbegin(); targets_it != targets.cend(); ++targets_it) { - for (auto ecus_it = targets_it->ecus().cbegin(); ecus_it != targets_it->ecus().cend(); ++ecus_it) { - const Uptane::EcuSerial ecu_serial = ecus_it->first; - +// TODO: the function blocks until it updates all the Secondaries. Consider non-blocking operation. +void SotaUptaneClient::sendMetadataToEcus(const std::vector &targets, data::InstallationResult *result, + std::string *raw_installation_report) { + data::InstallationResult final_result{data::ResultCode::Numeric::kOk, ""}; + std::string result_code_err_str; + for (const auto &target : targets) { + for (const auto &ecu : target.ecus()) { + const Uptane::EcuSerial ecu_serial = ecu.first; + const Uptane::HardwareIdentifier hw_id = ecu.second; auto sec = secondaries.find(ecu_serial); - if (sec != secondaries.end()) { + if (sec == secondaries.end()) { + continue; + } + + data::InstallationResult local_result{data::ResultCode::Numeric::kOk, ""}; + do { /* Root rotation if necessary */ - rotateSecondaryRoot(Uptane::RepositoryType::Director(), *(sec->second)); - rotateSecondaryRoot(Uptane::RepositoryType::Image(), *(sec->second)); - if (!sec->second->putMetadata(meta)) { - LOG_ERROR << "Sending metadata to " << sec->second->getSerial() << " failed"; + local_result = rotateSecondaryRoot(Uptane::RepositoryType::Director(), *(sec->second)); + if (!local_result.isSuccess()) { + final_result = local_result; + break; + } + local_result = rotateSecondaryRoot(Uptane::RepositoryType::Image(), *(sec->second)); + if (!local_result.isSuccess()) { + final_result = local_result; + break; } + try { + local_result = sec->second->putMetadata(target); + } catch (const std::exception &ex) { + local_result = data::InstallationResult(data::ResultCode::Numeric::kInternalError, ex.what()); + } + } while (false); + if (!local_result.isSuccess()) { + LOG_ERROR << "Sending metadata to " << sec->first << " failed: " << local_result.result_code << " " + << local_result.description; + const std::string ecu_code_str = hw_id.ToString() + ":" + local_result.result_code.ToString(); + result_code_err_str += (!result_code_err_str.empty() ? "|" : "") + ecu_code_str; } } } + + if (!result_code_err_str.empty()) { + // Sending the metadata to at least one of the ECUs has failed. + final_result = + data::InstallationResult(data::ResultCode(data::ResultCode::Numeric::kVerificationFailed, result_code_err_str), + "Sending metadata to one or more ECUs failed"); + if (raw_installation_report != nullptr) { + *raw_installation_report = "Sending metadata to one or more ECUs failed"; + } + } + + if (result != nullptr) { + *result = final_result; + } } -std::future SotaUptaneClient::sendFirmwareAsync(Uptane::SecondaryInterface &secondary, - const std::shared_ptr &data) { - auto f = [this, &secondary, data]() { +std::future SotaUptaneClient::sendFirmwareAsync(SecondaryInterface &secondary, + const Uptane::Target &target) { + auto f = [this, &secondary, target]() { const std::string &correlation_id = director_repo.getCorrelationId(); + sendEvent(secondary.getSerial()); report_queue->enqueue(std_::make_unique(secondary.getSerial(), correlation_id)); - bool ret = secondary.sendFirmware(data); - report_queue->enqueue( - std_::make_unique(secondary.getSerial(), correlation_id, ret)); - sendEvent(secondary.getSerial(), ret); - return ret; + data::InstallationResult result; + try { + result = secondary.sendFirmware(target); + if (result.isSuccess()) { + result = secondary.install(target); + } + } catch (const std::exception &ex) { + result = data::InstallationResult(data::ResultCode::Numeric::kInternalError, ex.what()); + } + + if (result.result_code == data::ResultCode::Numeric::kNeedCompletion) { + report_queue->enqueue(std_::make_unique(secondary.getSerial(), correlation_id)); + } else { + report_queue->enqueue( + std_::make_unique(secondary.getSerial(), correlation_id, result.isSuccess())); + } + + sendEvent(secondary.getSerial(), result.isSuccess()); + return result; }; return std::async(std::launch::async, f); @@ -1139,9 +1419,9 @@ std::future SotaUptaneClient::sendFirmwareAsync(Uptane::SecondaryInterface std::vector SotaUptaneClient::sendImagesToEcus(const std::vector &targets) { std::vector reports; - std::vector>> firmwareFutures; + std::vector>> firmwareFutures; - Uptane::EcuSerial primary_ecu_serial = uptane_manifest.getPrimaryEcuSerial(); + const Uptane::EcuSerial &primary_ecu_serial = primaryEcuSerial(); // target images should already have been downloaded to metadata_path/targets/ for (auto targets_it = targets.cbegin(); targets_it != targets.cend(); ++targets_it) { for (auto ecus_it = targets_it->ecus().cbegin(); ecus_it != targets_it->ecus().cend(); ++ecus_it) { @@ -1153,80 +1433,114 @@ std::vector SotaUptaneClient::sendImagesToEcus(const auto f = secondaries.find(ecu_serial); if (f == secondaries.end()) { - LOG_ERROR << "Target " << *targets_it << " has unknown ECU ID"; - last_exception = Uptane::BadEcuId(targets_it->filename()); + LOG_ERROR << "Target " << *targets_it << " has an unknown ECU serial"; continue; } - Uptane::SecondaryInterface &sec = *f->second; - if (targets_it->IsOstree()) { - // empty firmware means OSTree secondaries: pack credentials instead - const std::string creds_archive = secondaryTreehubCredentials(); - if (creds_archive.empty()) { - continue; - } - firmwareFutures.emplace_back(result::Install::EcuReport(*targets_it, ecu_serial, data::InstallationResult()), - sendFirmwareAsync(sec, std::make_shared(creds_archive))); - } else { - std::stringstream sstr; - sstr << *storage->openTargetFile(*targets_it); - const std::string fw = sstr.str(); - firmwareFutures.emplace_back(result::Install::EcuReport(*targets_it, ecu_serial, data::InstallationResult()), - sendFirmwareAsync(sec, std::make_shared(fw))); - } + SecondaryInterface &sec = *f->second; + firmwareFutures.emplace_back(result::Install::EcuReport(*targets_it, ecu_serial, data::InstallationResult()), + sendFirmwareAsync(sec, *targets_it)); } } for (auto &f : firmwareFutures) { - // failure - if (fiu_fail((std::string("secondary_install_") + f.first.serial.ToString()).c_str()) != 0) { - f.first.install_res = data::InstallationResult( - data::ResultCode(data::ResultCode::Numeric::kInstallFailed, fault_injection_last_info()), ""); - storage->saveEcuInstallationResult(f.first.serial, f.first.install_res); - reports.push_back(f.first); - continue; - } + data::InstallationResult fut_result = f.second.get(); - bool fut_result = f.second.get(); - if (fut_result) { - f.first.install_res = data::InstallationResult(data::ResultCode::Numeric::kOk, ""); - storage->saveInstalledVersion(f.first.serial.ToString(), f.first.update, InstalledVersionUpdateMode::kCurrent); - } else { - f.first.install_res = data::InstallationResult(data::ResultCode::Numeric::kInstallFailed, ""); + if (fut_result.isSuccess() || fut_result.result_code == data::ResultCode::Numeric::kNeedCompletion) { + f.first.update.setCorrelationId(director_repo.getCorrelationId()); + auto update_mode = + fut_result.isSuccess() ? InstalledVersionUpdateMode::kCurrent : InstalledVersionUpdateMode::kPending; + storage->saveInstalledVersion(f.first.serial.ToString(), f.first.update, update_mode); } + + f.first.install_res = fut_result; storage->saveEcuInstallationResult(f.first.serial, f.first.install_res); reports.push_back(f.first); } return reports; } -std::string SotaUptaneClient::secondaryTreehubCredentials() const { - if (config.tls.pkey_source != CryptoSource::kFile || config.tls.cert_source != CryptoSource::kFile || - config.tls.ca_source != CryptoSource::kFile) { - LOG_ERROR << "Cannot send OSTree update to a secondary when not using file as credential sources"; - return ""; - } - std::string ca, cert, pkey; - if (!storage->loadTlsCreds(&ca, &cert, &pkey)) { - LOG_ERROR << "Could not load tls credentials from storage"; - return ""; - } +Uptane::LazyTargetsList SotaUptaneClient::allTargets() const { + return Uptane::LazyTargetsList(image_repo, storage, uptane_fetcher); +} - std::string treehub_url = config.pacman.ostree_server; - std::map archive_map = { - {"ca.pem", ca}, {"client.pem", cert}, {"pkey.pem", pkey}, {"server.url", treehub_url}}; +void SotaUptaneClient::checkAndUpdatePendingSecondaries() { + std::vector> pending_ecus; + storage->getPendingEcus(&pending_ecus); - try { - std::stringstream as; - Utils::writeArchive(archive_map, as); + for (const auto &pending_ecu : pending_ecus) { + if (primaryEcuSerial() == pending_ecu.first) { + continue; + } + auto &sec = secondaries[pending_ecu.first]; + Uptane::Manifest manifest; + try { + manifest = sec->getManifest(); + } catch (const std::exception &ex) { + LOG_DEBUG << "Failed to get manifest from Secondary with serial " << pending_ecu.first << ": " << ex.what(); + continue; + } + if (manifest.empty()) { + LOG_DEBUG << "Failed to get manifest from Secondary with serial " << pending_ecu.first; + continue; + } + bool verified = false; + try { + verified = manifest.verifySignature(sec->getPublicKey()); + } catch (const std::exception &ex) { + LOG_ERROR << "Failed to get public key from Secondary with serial " << pending_ecu.first << ": " << ex.what(); + } + if (!verified) { + LOG_ERROR << "Invalid manifest or signature reported by Secondary: " + << " serial: " << pending_ecu.first << " manifest: " << manifest; + continue; + } + auto current_ecu_hash = manifest.installedImageHash(); + if (pending_ecu.second == current_ecu_hash) { + LOG_INFO << "The pending update " << current_ecu_hash << " has been installed on " << pending_ecu.first; + boost::optional pending_version; + if (storage->loadInstalledVersions(pending_ecu.first.ToString(), nullptr, &pending_version)) { + storage->saveEcuInstallationResult(pending_ecu.first, + data::InstallationResult(data::ResultCode::Numeric::kOk, "")); + + storage->saveInstalledVersion(pending_ecu.first.ToString(), *pending_version, + InstalledVersionUpdateMode::kCurrent); + + report_queue->enqueue(std_::make_unique( + pending_ecu.first, pending_version->correlation_id(), true)); + + data::InstallationResult ir; + std::string raw_report; + computeDeviceInstallationResult(&ir, &raw_report); + storage->storeDeviceInstallationResult(ir, raw_report, pending_version->correlation_id()); + } + } + } +} - return as.str(); - } catch (std::runtime_error &exc) { - LOG_ERROR << "Could not create credentials archive: " << exc.what(); - return ""; +boost::optional SotaUptaneClient::getEcuHwId(const Uptane::EcuSerial &serial) { + auto primary_ecu_serial = provisioner_.PrimaryEcuSerial(); + if (serial == primary_ecu_serial || serial.ToString().empty()) { + auto primary_ecu_hw_id = provisioner_.PrimaryHardwareIdentifier(); + if (primary_ecu_hw_id == Uptane::HardwareIdentifier::Unknown()) { + return boost::none; + } + return primary_ecu_hw_id; } + + const auto it = secondaries.find(serial); + if (it != secondaries.end()) { + return it->second->getHwId(); + } + + return boost::none; } -Uptane::LazyTargetsList SotaUptaneClient::allTargets() { - return Uptane::LazyTargetsList(images_repo, storage, uptane_fetcher); +std::ifstream SotaUptaneClient::openStoredTarget(const Uptane::Target &target) { + auto status = package_manager_->verifyTarget(target); + if (status == TargetStatus::kGood) { + return package_manager_->openTargetFile(target); + } else { + throw std::runtime_error("Failed to open Target"); + } } diff --git a/src/libaktualizr/primary/sotauptaneclient.h b/src/libaktualizr/primary/sotauptaneclient.h index 44dfd4631c..cc3a95b9fb 100644 --- a/src/libaktualizr/primary/sotauptaneclient.h +++ b/src/libaktualizr/primary/sotauptaneclient.h @@ -11,68 +11,90 @@ #include "gtest/gtest_prod.h" #include "json/json.h" +#include "libaktualizr/campaign.h" +#include "libaktualizr/config.h" +#include "libaktualizr/events.h" +#include "libaktualizr/packagemanagerfactory.h" +#include "libaktualizr/packagemanagerinterface.h" +#include "libaktualizr/results.h" +#include "libaktualizr/secondaryinterface.h" + #include "bootloader/bootloader.h" -#include "campaign/campaign.h" -#include "config/config.h" #include "http/httpclient.h" -#include "package_manager/packagemanagerinterface.h" -#include "primary/events.h" -#include "primary/results.h" +#include "primary/secondary_provider_builder.h" +#include "provisioner.h" #include "reportqueue.h" -#include "storage/invstorage.h" #include "uptane/directorrepository.h" #include "uptane/exceptions.h" #include "uptane/fetcher.h" -#include "uptane/imagesrepository.h" +#include "uptane/imagerepository.h" #include "uptane/iterator.h" -#include "uptane/secondaryinterface.h" +#include "uptane/manifest.h" +#include "uptane/tuf.h" +#include "utilities/apiqueue.h" class SotaUptaneClient { public: - static std::shared_ptr newDefaultClient( - Config &config_in, std::shared_ptr storage_in, - std::shared_ptr events_channel_in = nullptr); - - SotaUptaneClient(Config &config_in, const std::shared_ptr &storage_in, - std::shared_ptr http_client, std::shared_ptr bootloader_in, - std::shared_ptr report_queue_in, - std::shared_ptr events_channel_in = nullptr); - ~SotaUptaneClient(); + /** + * Provisioning was needed, attempted and failed. + * Thrown by requiresProvision(). + */ + class ProvisioningFailed : public std::runtime_error { + public: + explicit ProvisioningFailed() : std::runtime_error("Device was not able provision on-line") {} + }; + + /** + * Device must be provisioned before calling this operation. + * Thrown by requiresAlreadyProvisioned(). + */ + class NotProvisionedYet : public std::runtime_error { + public: + explicit NotProvisionedYet() : std::runtime_error("Device is not provisioned on-line yet") {} + }; + + SotaUptaneClient(Config &config_in, std::shared_ptr storage_in, std::shared_ptr http_in, + std::shared_ptr events_channel_in); + + SotaUptaneClient(Config &config_in, const std::shared_ptr &storage_in) + : SotaUptaneClient(config_in, storage_in, std::make_shared(), nullptr) {} void initialize(); - void addNewSecondary(const std::shared_ptr &sec); + void addSecondary(const std::shared_ptr &sec); + + /** + * Make one attempt at provisioning on-line. + * If the device is already provisioned then this is a no-op. + * @return True if the device has completed on-line provisioning + */ + bool attemptProvision(); + result::Download downloadImages(const std::vector &targets, const api::FlowControlToken *token = nullptr); - std::pair downloadImage(const Uptane::Target &target, - const api::FlowControlToken *token = nullptr); + + /** See Aktualizr::SetCustomHardwareInfo(Json::Value) */ + void setCustomHardwareInfo(Json::Value hwinfo) { custom_hardware_info_ = std::move(hwinfo); } void reportPause(); void reportResume(); void sendDeviceData(); result::UpdateCheck fetchMeta(); bool putManifest(const Json::Value &custom = Json::nullValue); - result::UpdateCheck checkUpdates(); result::Install uptaneInstall(const std::vector &updates); result::CampaignCheck campaignCheck(); void campaignAccept(const std::string &campaign_id); void campaignDecline(const std::string &campaign_id); void campaignPostpone(const std::string &campaign_id); - bool hasPendingUpdates(); + bool hasPendingUpdates() const; bool isInstallCompletionRequired(); void completeInstall(); - Uptane::LazyTargetsList allTargets(); - Uptane::Target getCurrent() { return package_manager_->getCurrent(); } - - bool updateImagesMeta(); // TODO: make private once aktualizr has a proper TUF API - bool checkImagesMetaOffline(); - data::InstallationResult PackageInstall(const Uptane::Target &target); - TargetStatus VerifyTarget(const Uptane::Target &target) { return package_manager_->verifyTarget(target); } - - protected: - void addSecondary(const std::shared_ptr &sec); + std::vector getStoredTargets() const { return package_manager_->getTargetFiles(); } + void deleteStoredTarget(const Uptane::Target &target) { package_manager_->removeTargetFile(target); } + std::ifstream openStoredTarget(const Uptane::Target &target); private: FRIEND_TEST(Aktualizr, FullNoUpdates); FRIEND_TEST(Aktualizr, DeviceInstallationResult); + FRIEND_TEST(Aktualizr, DeviceInstallationResultMetadata); FRIEND_TEST(Aktualizr, FullMultipleSecondaries); FRIEND_TEST(Aktualizr, CheckNoUpdates); FRIEND_TEST(Aktualizr, DownloadWithUpdates); @@ -82,7 +104,6 @@ class SotaUptaneClient { FRIEND_TEST(Aktualizr, EmptyTargets); FRIEND_TEST(Aktualizr, FullOstreeUpdate); FRIEND_TEST(Aktualizr, DownloadNonOstreeBin); - FRIEND_TEST(DockerAppManager, DockerApp_Fetch); FRIEND_TEST(Uptane, AssembleManifestGood); FRIEND_TEST(Uptane, AssembleManifestBad); FRIEND_TEST(Uptane, InstallFakeGood); @@ -95,43 +116,81 @@ class SotaUptaneClient { FRIEND_TEST(UptaneCI, CheckKeys); FRIEND_TEST(UptaneKey, Check); // Note hacky name FRIEND_TEST(UptaneNetwork, DownloadFailure); + FRIEND_TEST(UptaneNetwork, LogConnectivityRestored); + FRIEND_TEST(UptaneOstree, InitialManifest); FRIEND_TEST(UptaneVector, Test); FRIEND_TEST(aktualizr_secondary_uptane, credentialsPassing); + FRIEND_TEST(MetadataExpirationTest, MetadataExpirationAfterInstallationAndBeforeApplication); + FRIEND_TEST(MetadataExpirationTest, MetadataExpirationAfterInstallationAndBeforeReboot); + FRIEND_TEST(MetadataExpirationTest, MetadataExpirationBeforeInstallation); + FRIEND_TEST(Delegation, IterateAll); friend class CheckForUpdate; // for load tests friend class ProvisionDeviceTask; // for load tests - bool uptaneIteration(std::vector *targets, unsigned int *ecus_count); - bool uptaneOfflineIteration(std::vector *targets, unsigned int *ecus_count); + /** + * This operation requires that the device is provisioned. + * Make one attempt at provisioning on-line, and if it fails throw a + * ProvisioningFailed exception. + */ + void requiresProvision(); + + /** + * This operation requires that the device is already provisioned. + * If it isn't then immediately throw a NotProvisionedYet exception without + * attempting any network communications. + */ + void requiresAlreadyProvisioned(); + + data::InstallationResult PackageInstall(const Uptane::Target &target); + std::pair downloadImage(const Uptane::Target &target, + const api::FlowControlToken *token = nullptr); + void uptaneIteration(std::vector *targets, unsigned int *ecus_count); + void uptaneOfflineIteration(std::vector *targets, unsigned int *ecus_count); + result::UpdateCheck checkUpdates(); result::UpdateStatus checkUpdatesOffline(const std::vector &targets); Json::Value AssembleManifest(); - std::string secondaryTreehubCredentials() const; - Uptane::Exception getLastException() const { return last_exception; } - bool isInstalledOnPrimary(const Uptane::Target &target); + std::exception_ptr getLastException() const { return last_exception; } + Uptane::Target getCurrent() const { return package_manager_->getCurrent(); } + static std::vector findForEcu(const std::vector &targets, const Uptane::EcuSerial &ecu_id); data::InstallationResult PackageInstallSetResult(const Uptane::Target &target); void finalizeAfterReboot(); + // Part of sendDeviceData() void reportHwInfo(); + // Part of sendDeviceData() void reportInstalledPackages(); + // Called by sendDeviceData() and fetchMeta() void reportNetworkInfo(); - void verifySecondaries(); - void sendMetadataToEcus(const std::vector &targets); - std::future sendFirmwareAsync(Uptane::SecondaryInterface &secondary, const std::shared_ptr &data); + // Part of sendDeviceData() + void reportAktualizrConfiguration(); + bool waitSecondariesReachable(const std::vector &updates); + void storeInstallationFailure(const data::InstallationResult &result); + data::InstallationResult rotateSecondaryRoot(Uptane::RepositoryType repo, SecondaryInterface &secondary); + void sendMetadataToEcus(const std::vector &targets, data::InstallationResult *result, + std::string *raw_installation_report); + std::future sendFirmwareAsync(SecondaryInterface &secondary, const Uptane::Target &target); std::vector sendImagesToEcus(const std::vector &targets); bool putManifestSimple(const Json::Value &custom = Json::nullValue); - bool getNewTargets(std::vector *new_targets, unsigned int *ecus_count = nullptr); - void rotateSecondaryRoot(Uptane::RepositoryType repo, Uptane::SecondaryInterface &secondary); - bool updateDirectorMeta(); - bool checkDirectorMetaOffline(); - void computeDeviceInstallationResult(data::InstallationResult *result, const std::string &correlation_id); + void getNewTargets(std::vector *new_targets, unsigned int *ecus_count = nullptr); + void updateDirectorMeta(); + void updateImageMeta(); + void checkDirectorMetaOffline(); + void checkImageMetaOffline(); + + void computeDeviceInstallationResult(data::InstallationResult *result, std::string *raw_installation_report); std::unique_ptr findTargetInDelegationTree(const Uptane::Target &target, bool offline); std::unique_ptr findTargetHelper(const Uptane::Targets &cur_targets, const Uptane::Target &queried_target, int level, bool terminating, bool offline); + Uptane::LazyTargetsList allTargets() const; + void checkAndUpdatePendingSecondaries(); + Uptane::EcuSerial primaryEcuSerial() { return provisioner_.PrimaryEcuSerial(); } + boost::optional getEcuHwId(const Uptane::EcuSerial &serial); template - void sendEvent(Args &&... args) { + void sendEvent(Args &&...args) { std::shared_ptr event = std::make_shared(std::forward(args)...); if (events_channel) { (*events_channel)(std::move(event)); @@ -142,42 +201,22 @@ class SotaUptaneClient { Config &config; Uptane::DirectorRepository director_repo; - Uptane::ImagesRepository images_repo; - Uptane::Manifest uptane_manifest; + Uptane::ImageRepository image_repo; + Uptane::ManifestIssuer::Ptr uptane_manifest; std::shared_ptr storage; - std::shared_ptr package_manager_; std::shared_ptr http; + std::shared_ptr package_manager_; + std::shared_ptr key_manager_; std::shared_ptr uptane_fetcher; - std::shared_ptr bootloader; - std::shared_ptr report_queue; - Json::Value last_network_info_reported; - Uptane::EcuMap hw_ids; + std::unique_ptr report_queue; + std::shared_ptr secondary_provider_; std::shared_ptr events_channel; - boost::signals2::connection conn; - Uptane::Exception last_exception{"", ""}; + std::exception_ptr last_exception; // ecu_serial => secondary* - std::map> secondaries; + std::map secondaries; std::mutex download_mutex; -}; - -class TargetCompare { - public: - explicit TargetCompare(const Uptane::Target &target_in) : target(target_in) {} - bool operator()(const Uptane::Target &in) const { return (in.MatchTarget(target)); } - - private: - const Uptane::Target ⌖ -}; - -class SerialCompare { - public: - explicit SerialCompare(Uptane::EcuSerial serial_in) : serial(std::move(serial_in)) {} - bool operator()(const std::pair &in) const { - return (in.first == serial); - } - - private: - const Uptane::EcuSerial serial; + Provisioner provisioner_; + Json::Value custom_hardware_info_{Json::nullValue}; }; #endif // SOTA_UPTANE_CLIENT_H_ diff --git a/src/libaktualizr/primary/target_mismatch_test.cc b/src/libaktualizr/primary/target_mismatch_test.cc index 97f33163df..c9a70086fe 100644 --- a/src/libaktualizr/primary/target_mismatch_test.cc +++ b/src/libaktualizr/primary/target_mismatch_test.cc @@ -3,14 +3,14 @@ #include #include "httpfake.h" -#include "primary/aktualizr.h" +#include "libaktualizr/aktualizr.h" #include "test_utils.h" #include "uptane_test_common.h" boost::filesystem::path uptane_generator_path; /* - * Detect a mismatch in the Targets metadata from Director and Images repo. + * Detect a mismatch in the Targets metadata from Director and Image repo. */ TEST(Aktualizr, HardwareMismatch) { TemporaryDirectory temp_dir; diff --git a/src/libaktualizr/primary/uptane_key_test.cc b/src/libaktualizr/primary/uptane_key_test.cc index 1342f723d3..6c88355bbc 100644 --- a/src/libaktualizr/primary/uptane_key_test.cc +++ b/src/libaktualizr/primary/uptane_key_test.cc @@ -11,10 +11,8 @@ #include "httpfake.h" #include "logging/logging.h" #include "managedsecondary.h" -#include "primary/reportqueue.h" #include "primary/sotauptaneclient.h" #include "storage/invstorage.h" -#include "uptane/uptanerepository.h" #include "uptane_test_common.h" void initKeyTests(Config& config, Primary::VirtualSecondaryConfig& ecu_config1, @@ -23,12 +21,12 @@ void initKeyTests(Config& config, Primary::VirtualSecondaryConfig& ecu_config1, boost::filesystem::copy_file("tests/test_data/cred.zip", temp_dir / "cred.zip"); config.provision.primary_ecu_serial = "testecuserial"; config.provision.provision_path = temp_dir / "cred.zip"; - config.provision.mode = ProvisionMode::kSharedCred; + config.provision.mode = ProvisionMode::kSharedCredReuse; config.tls.server = tls_server; config.uptane.director_server = tls_server + "/director"; config.uptane.repo_server = tls_server + "/repo"; config.storage.path = temp_dir.Path(); - config.pacman.type = PackageManager::kNone; + config.pacman.type = PACKAGE_MANAGER_NONE; ecu_config1.partial_verifying = false; ecu_config1.full_client_dir = temp_dir.Path(); @@ -55,7 +53,9 @@ void initKeyTests(Config& config, Primary::VirtualSecondaryConfig& ecu_config1, // SotaUptaneClient. The name is carefully constructed for this purpose. class UptaneKey_Check_Test { public: - static void checkKeyTests(std::shared_ptr& storage, std::shared_ptr sota_client) { + static void checkKeyTests(std::shared_ptr& storage, SotaUptaneClient& sota_client) { + EXPECT_NO_THROW(sota_client.initialize()); + EXPECT_TRUE(sota_client.attemptProvision()); // Verify that TLS credentials are valid. std::string ca; std::string cert; @@ -65,7 +65,7 @@ class UptaneKey_Check_Test { EXPECT_GT(cert.size(), 0); EXPECT_GT(pkey.size(), 0); - // Verify that primary keys are valid. + // Verify that Primary keys are valid. std::string primary_public; std::string primary_private; EXPECT_TRUE(storage->loadPrimaryKeys(&primary_public, &primary_private)); @@ -81,9 +81,8 @@ class UptaneKey_Check_Test { public_keys.push_back(primary_public); private_keys.push_back(primary_private); - // Verify that each secondary has valid keys. - std::map >::iterator it; - for (it = sota_client->secondaries.begin(); it != sota_client->secondaries.end(); it++) { + // Verify that each Secondary has valid keys. + for (auto it = sota_client.secondaries.begin(); it != sota_client.secondaries.end(); it++) { std::shared_ptr managed = boost::polymorphic_pointer_downcast(it->second); std::string public_key; @@ -116,11 +115,10 @@ TEST(UptaneKey, CheckAllKeys) { Primary::VirtualSecondaryConfig ecu_config2; initKeyTests(config, ecu_config1, ecu_config2, temp_dir, http->tls_server); auto storage = INvStorage::newStorage(config.storage); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); - sota_client->addNewSecondary(std::make_shared(ecu_config1)); - sota_client->addNewSecondary(std::make_shared(ecu_config2)); - EXPECT_NO_THROW(sota_client->initialize()); - UptaneKey_Check_Test::checkKeyTests(storage, sota_client); + auto sota_client = std_::make_unique(config, storage, http); + sota_client->addSecondary(std::make_shared(ecu_config1)); + sota_client->addSecondary(std::make_shared(ecu_config2)); + UptaneKey_Check_Test::checkKeyTests(storage, *sota_client); } /** @@ -139,11 +137,10 @@ TEST(UptaneKey, RecoverWithoutKeys) { // Initialize. { auto storage = INvStorage::newStorage(config.storage); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); - sota_client->addNewSecondary(std::make_shared(ecu_config1)); - sota_client->addNewSecondary(std::make_shared(ecu_config2)); - EXPECT_NO_THROW(sota_client->initialize()); - UptaneKey_Check_Test::checkKeyTests(storage, sota_client); + auto sota_client = std_::make_unique(config, storage, http); + sota_client->addSecondary(std::make_shared(ecu_config1)); + sota_client->addSecondary(std::make_shared(ecu_config2)); + UptaneKey_Check_Test::checkKeyTests(storage, *sota_client); // Remove TLS keys but keep ECU keys and try to initialize again. storage->clearTlsCreds(); @@ -151,10 +148,10 @@ TEST(UptaneKey, RecoverWithoutKeys) { { auto storage = INvStorage::newStorage(config.storage); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); - - EXPECT_NO_THROW(sota_client->initialize()); - UptaneKey_Check_Test::checkKeyTests(storage, sota_client); + auto sota_client = std_::make_unique(config, storage, http); + sota_client->addSecondary(std::make_shared(ecu_config1)); + sota_client->addSecondary(std::make_shared(ecu_config2)); + UptaneKey_Check_Test::checkKeyTests(storage, *sota_client); // Remove ECU keys but keep TLS keys and try to initialize again. storage->clearPrimaryKeys(); @@ -167,10 +164,10 @@ TEST(UptaneKey, RecoverWithoutKeys) { { auto storage = INvStorage::newStorage(config.storage); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); - - EXPECT_NO_THROW(sota_client->initialize()); - UptaneKey_Check_Test::checkKeyTests(storage, sota_client); + auto sota_client = std_::make_unique(config, storage, http); + sota_client->addSecondary(std::make_shared(ecu_config1)); + sota_client->addSecondary(std::make_shared(ecu_config2)); + UptaneKey_Check_Test::checkKeyTests(storage, *sota_client); } } diff --git a/src/libaktualizr/socket_activation/CMakeLists.txt b/src/libaktualizr/socket_activation/CMakeLists.txt deleted file mode 100644 index 74d5ebe8d7..0000000000 --- a/src/libaktualizr/socket_activation/CMakeLists.txt +++ /dev/null @@ -1,17 +0,0 @@ -set(HEADERS socket_activation.h) -if(BUILD_SYSTEMD) - set(SOURCES socket_activation_systemd.cc) -else() - set(SOURCES socket_activation_dummy.cc) -endif() - -aktualizr_source_file_checks(socket_activation_systemd.cc socket_activation_dummy.cc) - -add_library(socket_activation OBJECT ${SOURCES}) -target_include_directories(socket_activation PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) - -if(BUILD_SYSTEMD) - target_include_directories(socket_activation PUBLIC ${SYSTEMD_INCLUDE_DIR}) - endif() - -aktualizr_source_file_checks(${HEADERS}) diff --git a/src/libaktualizr/socket_activation/socket_activation.h b/src/libaktualizr/socket_activation/socket_activation.h deleted file mode 100644 index 2c25a0f2ea..0000000000 --- a/src/libaktualizr/socket_activation/socket_activation.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef SOCKET_ACTIVATION_H_ -#define SOCKET_ACTIVATION_H_ - -namespace socket_activation { - -// maps the interface of - -extern int listen_fds_start; - -int listen_fds(int unset_environment); -int listen_fds_with_names(int unset_environment, char*** names); -} // namespace socket_activation - -#endif // SOCKET_ACTIVATION_H_ diff --git a/src/libaktualizr/socket_activation/socket_activation_dummy.cc b/src/libaktualizr/socket_activation/socket_activation_dummy.cc deleted file mode 100644 index 7c85842ae2..0000000000 --- a/src/libaktualizr/socket_activation/socket_activation_dummy.cc +++ /dev/null @@ -1,19 +0,0 @@ -#include "socket_activation/socket_activation.h" - -namespace socket_activation { - -int listen_fds_start = 3; - -int listen_fds(int unset_environment) { - (void)unset_environment; - - return 0; -} - -int listen_fds_with_names(int unset_environment, char*** names) { - (void)unset_environment; - (void)names; - - return 0; -} -}; // namespace socket_activation diff --git a/src/libaktualizr/socket_activation/socket_activation_systemd.cc b/src/libaktualizr/socket_activation/socket_activation_systemd.cc deleted file mode 100644 index 8e71d85c83..0000000000 --- a/src/libaktualizr/socket_activation/socket_activation_systemd.cc +++ /dev/null @@ -1,14 +0,0 @@ -#include - -#include "socket_activation/socket_activation.h" - -namespace socket_activation { - -int listen_fds_start = SD_LISTEN_FDS_START; - -int listen_fds(int unset_environment) { return sd_listen_fds(unset_environment); } - -int listen_fds_with_names(int unset_environment, char*** names) { - return sd_listen_fds_with_names(unset_environment, names); -} -} // namespace socket_activation diff --git a/src/libaktualizr/storage/CMakeLists.txt b/src/libaktualizr/storage/CMakeLists.txt index c02bd37d43..eca397ef98 100644 --- a/src/libaktualizr/storage/CMakeLists.txt +++ b/src/libaktualizr/storage/CMakeLists.txt @@ -6,33 +6,32 @@ add_custom_command(OUTPUT sql_schemas.cc sql_schemas_target WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} ) -if(STORAGE_TYPE STREQUAL "sqlite") - set(SOURCES sqlstorage.cc sqlstorage_base.cc) - set(HEADERS sqlstorage.h sql_utils.h sqlstorage_base.h storage_exception.h) -elseif(STORAGE_TYPE STREQUAL "android") - set(SOURCES androidstorage.cc) - set(HEADERS androidstorage.h) -else() - message(FATAL_ERROR "Unknown storage type: ${storage_type}") -endif() - -set(HEADERS ${HEADERS} storage_config.h fsstorage_read.h invstorage.h) -set(SOURCES ${SOURCES} fsstorage_read.cc invstorage.cc) +set(HEADERS fsstorage_read.h + invstorage.h + sql_utils.h + sqlstorage.h + sqlstorage_base.h + storage_exception.h) + +set(SOURCES fsstorage_read.cc + invstorage.cc + sqlstorage.cc + sqlstorage_base.cc) target_sources(config PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/storage_config.cc) -if(STORAGE_TYPE STREQUAL "sqlite") - add_aktualizr_test(NAME storage_atomic SOURCES storage_atomic_test.cc PROJECT_WORKING_DIRECTORY) - add_aktualizr_test(NAME sql_utils SOURCES sql_utils_test.cc PROJECT_WORKING_DIRECTORY) - add_aktualizr_test(NAME sqlstorage SOURCES sqlstorage_test.cc sql_schemas.cc - ARGS ${CMAKE_CURRENT_SOURCE_DIR}/test) - list(REMOVE_ITEM TEST_SOURCES sql_schemas.cc) - add_aktualizr_test(NAME storage SOURCES storage_common_test.cc PROJECT_WORKING_DIRECTORY) - - add_test(NAME test_schema_migration - COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/schema_migration_test.sh ${PROJECT_SOURCE_DIR}/config/sql) - set_tests_properties(test_schema_migration PROPERTIES LABELS "noptest") -endif(STORAGE_TYPE STREQUAL "sqlite") +add_aktualizr_test(NAME storage_atomic SOURCES storage_atomic_test.cc PROJECT_WORKING_DIRECTORY) +add_aktualizr_test(NAME sql_utils SOURCES sql_utils_test.cc PROJECT_WORKING_DIRECTORY) +add_aktualizr_test(NAME sqlstorage SOURCES sqlstorage_test.cc ARGS ${CMAKE_CURRENT_SOURCE_DIR}/test) +add_aktualizr_test(NAME storage_common + SOURCES storage_common_test.cc + LIBRARIES uptane_generator_lib + PROJECT_WORKING_DIRECTORY) + +add_test(NAME test_schema_migration + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/schema_migration_test.sh ${PROJECT_SOURCE_DIR}/config/sql) + +set_tests_properties(test_schema_migration PROPERTIES LABELS "noptest") add_library(storage OBJECT ${SOURCES} sql_schemas.cc) diff --git a/src/libaktualizr/storage/fsstorage_read.cc b/src/libaktualizr/storage/fsstorage_read.cc index 46889f58f4..2720fe325e 100644 --- a/src/libaktualizr/storage/fsstorage_read.cc +++ b/src/libaktualizr/storage/fsstorage_read.cc @@ -4,9 +4,11 @@ #include #include +#include #include #include +#include #include #include #include "json/json.h" @@ -15,12 +17,12 @@ #include "utilities/utils.h" FSStorageRead::FSStorageRead(const StorageConfig& config) : config_(config) { - boost::filesystem::path images_path = config_.uptane_metadata_path.get(config_.path) / "repo"; + boost::filesystem::path image_path = config_.uptane_metadata_path.get(config_.path) / "repo"; boost::filesystem::path director_path = config_.uptane_metadata_path.get(config_.path) / "director"; - // migrate from old unversioned Uptane root meta + // migrate from old unversioned Uptane Root metadata { for (auto repo : {Uptane::RepositoryType::Director(), Uptane::RepositoryType::Image()}) { - boost::filesystem::path& meta_dir = repo == (Uptane::RepositoryType::Director()) ? director_path : images_path; + boost::filesystem::path& meta_dir = repo == (Uptane::RepositoryType::Director()) ? director_path : image_path; boost::filesystem::path meta_path = meta_dir / Uptane::Version().RoleFileName(Uptane::Role::Root()); if (boost::filesystem::exists(meta_path)) { std::string data = Utils::readFile(meta_path); @@ -34,14 +36,14 @@ FSStorageRead::FSStorageRead(const StorageConfig& config) : config_(config) { } latest_director_root = findMaxVersion(director_path, Uptane::Role::Root()); - latest_images_root = findMaxVersion(images_path, Uptane::Role::Root()); + latest_image_root = findMaxVersion(image_path, Uptane::Role::Root()); } -bool FSStorageRead::loadPrimaryKeys(std::string* public_key, std::string* private_key) { +bool FSStorageRead::loadPrimaryKeys(std::string* public_key, std::string* private_key) const { return loadPrimaryPublic(public_key) && loadPrimaryPrivate(private_key); } -bool FSStorageRead::loadPrimaryPublic(std::string* public_key) { +bool FSStorageRead::loadPrimaryPublic(std::string* public_key) const { boost::filesystem::path public_key_path = config_.uptane_public_key_path.get(config_.path); if (!boost::filesystem::exists(public_key_path)) { return false; @@ -53,7 +55,7 @@ bool FSStorageRead::loadPrimaryPublic(std::string* public_key) { return true; } -bool FSStorageRead::loadPrimaryPrivate(std::string* private_key) { +bool FSStorageRead::loadPrimaryPrivate(std::string* private_key) const { boost::filesystem::path private_key_path = config_.uptane_private_key_path.get(config_.path); if (!boost::filesystem::exists(private_key_path)) { return false; @@ -65,7 +67,7 @@ bool FSStorageRead::loadPrimaryPrivate(std::string* private_key) { return true; } -bool FSStorageRead::loadTlsCreds(std::string* ca, std::string* cert, std::string* pkey) { +bool FSStorageRead::loadTlsCreds(std::string* ca, std::string* cert, std::string* pkey) const { boost::filesystem::path ca_path(config_.tls_cacert_path.get(config_.path)); boost::filesystem::path cert_path(config_.tls_clientcert_path.get(config_.path)); boost::filesystem::path pkey_path(config_.tls_pkey_path.get(config_.path)); @@ -86,7 +88,7 @@ bool FSStorageRead::loadTlsCreds(std::string* ca, std::string* cert, std::string return true; } -bool FSStorageRead::loadTlsCommon(std::string* data, const BasedPath& path_in) { +bool FSStorageRead::loadTlsCommon(std::string* data, const utils::BasedPath& path_in) const { boost::filesystem::path path(path_in.get(config_.path)); if (!boost::filesystem::exists(path)) { return false; @@ -99,13 +101,13 @@ bool FSStorageRead::loadTlsCommon(std::string* data, const BasedPath& path_in) { return true; } -bool FSStorageRead::loadTlsCa(std::string* ca) { return loadTlsCommon(ca, config_.tls_cacert_path); } +bool FSStorageRead::loadTlsCa(std::string* ca) const { return loadTlsCommon(ca, config_.tls_cacert_path); } -bool FSStorageRead::loadTlsCert(std::string* cert) { return loadTlsCommon(cert, config_.tls_clientcert_path); } +bool FSStorageRead::loadTlsCert(std::string* cert) const { return loadTlsCommon(cert, config_.tls_clientcert_path); } -bool FSStorageRead::loadTlsPkey(std::string* pkey) { return loadTlsCommon(pkey, config_.tls_pkey_path); } +bool FSStorageRead::loadTlsPkey(std::string* pkey) const { return loadTlsCommon(pkey, config_.tls_pkey_path); } -bool FSStorageRead::loadRoot(std::string* data, Uptane::RepositoryType repo, Uptane::Version version) { +bool FSStorageRead::loadRoot(std::string* data, Uptane::RepositoryType repo, Uptane::Version version) const { boost::filesystem::path metafile; switch (repo) { case (Uptane::RepositoryType::Director()): @@ -141,7 +143,7 @@ bool FSStorageRead::loadRoot(std::string* data, Uptane::RepositoryType repo, Upt return true; } -bool FSStorageRead::loadNonRoot(std::string* data, Uptane::RepositoryType repo, const Uptane::Role& role) { +bool FSStorageRead::loadNonRoot(std::string* data, Uptane::RepositoryType repo, const Uptane::Role& role) const { boost::filesystem::path metafile; switch (repo) { case (Uptane::RepositoryType::Director()): @@ -166,7 +168,7 @@ bool FSStorageRead::loadNonRoot(std::string* data, Uptane::RepositoryType repo, return true; } -bool FSStorageRead::loadDeviceId(std::string* device_id) { +bool FSStorageRead::loadDeviceId(std::string* device_id) const { if (!boost::filesystem::exists(Utils::absolutePath(config_.path, "device_id").string())) { return false; } @@ -177,11 +179,11 @@ bool FSStorageRead::loadDeviceId(std::string* device_id) { return true; } -bool FSStorageRead::loadEcuRegistered() { +bool FSStorageRead::loadEcuRegistered() const { return boost::filesystem::exists(Utils::absolutePath(config_.path, "is_registered").string()); } -bool FSStorageRead::loadEcuSerials(EcuSerials* serials) { +bool FSStorageRead::loadEcuSerials(EcuSerials* serials) const { std::string buf; std::string serial; std::string hw_id; @@ -229,21 +231,27 @@ bool FSStorageRead::loadEcuSerials(EcuSerials* serials) { return true; } -bool FSStorageRead::loadMisconfiguredEcus(std::vector* ecus) { +bool FSStorageRead::loadMisconfiguredEcus(std::vector* ecus) const { if (!boost::filesystem::exists(Utils::absolutePath(config_.path, "misconfigured_ecus"))) { return false; } - Json::Value content_json = Utils::parseJSONFile(Utils::absolutePath(config_.path, "misconfigured_ecus").string()); - for (Json::ValueIterator it = content_json.begin(); it != content_json.end(); ++it) { - ecus->push_back(MisconfiguredEcu(Uptane::EcuSerial((*it)["serial"].asString()), - Uptane::HardwareIdentifier((*it)["hardware_id"].asString()), - static_cast((*it)["state"].asInt()))); + try { + Json::Value content_json = Utils::parseJSONFile(Utils::absolutePath(config_.path, "misconfigured_ecus").string()); + for (auto it = content_json.begin(); it != content_json.end(); ++it) { + ecus->push_back(MisconfiguredEcu(Uptane::EcuSerial((*it)["serial"].asString()), + Uptane::HardwareIdentifier((*it)["hardware_id"].asString()), + static_cast((*it)["state"].asInt()))); + } + } catch (const std::exception& ex) { + LOG_ERROR << "Unable to parse misconfigured_ecus: " << ex.what(); + return false; } return true; } -bool FSStorageRead::loadInstalledVersions(std::vector* installed_versions, size_t* current_version) { +bool FSStorageRead::loadInstalledVersions(std::vector* installed_versions, + size_t* current_version) const { const boost::filesystem::path path = Utils::absolutePath(config_.path, "installed_versions"); return INvStorage::fsReadInstalledVersions(path, installed_versions, current_version); } diff --git a/src/libaktualizr/storage/fsstorage_read.h b/src/libaktualizr/storage/fsstorage_read.h index 4898478304..652e2243b7 100644 --- a/src/libaktualizr/storage/fsstorage_read.h +++ b/src/libaktualizr/storage/fsstorage_read.h @@ -1,33 +1,37 @@ #ifndef FSSTORAGE_READ_H_ #define FSSTORAGE_READ_H_ -#include +#include #include "invstorage.h" class FSStorageRead { public: explicit FSStorageRead(const StorageConfig& config); ~FSStorageRead() = default; - bool loadPrimaryKeys(std::string* public_key, std::string* private_key); - bool loadPrimaryPublic(std::string* public_key); - bool loadPrimaryPrivate(std::string* private_key); - - bool loadTlsCreds(std::string* ca, std::string* cert, std::string* pkey); - bool loadTlsCa(std::string* ca); - bool loadTlsCert(std::string* cert); - bool loadTlsPkey(std::string* pkey); - - bool loadRoot(std::string* data, Uptane::RepositoryType repo, Uptane::Version version); - bool loadLatestRoot(std::string* data, Uptane::RepositoryType repo) { + FSStorageRead(const FSStorageRead&) = delete; + FSStorageRead(FSStorageRead&&) = delete; + FSStorageRead& operator=(const FSStorageRead&) = delete; + FSStorageRead& operator=(FSStorageRead&&) = delete; + bool loadPrimaryKeys(std::string* public_key, std::string* private_key) const; + bool loadPrimaryPublic(std::string* public_key) const; + bool loadPrimaryPrivate(std::string* private_key) const; + + bool loadTlsCreds(std::string* ca, std::string* cert, std::string* pkey) const; + bool loadTlsCa(std::string* ca) const; + bool loadTlsCert(std::string* cert) const; + bool loadTlsPkey(std::string* pkey) const; + + bool loadRoot(std::string* data, Uptane::RepositoryType repo, Uptane::Version version) const; + bool loadLatestRoot(std::string* data, Uptane::RepositoryType repo) const { return loadRoot(data, repo, Uptane::Version()); }; - bool loadNonRoot(std::string* data, Uptane::RepositoryType repo, const Uptane::Role& role); + bool loadNonRoot(std::string* data, Uptane::RepositoryType repo, const Uptane::Role& role) const; - bool loadDeviceId(std::string* device_id); - bool loadEcuSerials(EcuSerials* serials); - bool loadMisconfiguredEcus(std::vector* ecus); - bool loadEcuRegistered(); - bool loadInstalledVersions(std::vector* installed_versions, size_t* current_version); + bool loadDeviceId(std::string* device_id) const; + bool loadEcuSerials(EcuSerials* serials) const; + bool loadMisconfiguredEcus(std::vector* ecus) const; + bool loadEcuRegistered() const; + bool loadInstalledVersions(std::vector* installed_versions, size_t* current_version) const; void cleanUpAll(); @@ -37,12 +41,12 @@ class FSStorageRead { const StorageConfig& config_; Uptane::Version latest_director_root; - Uptane::Version latest_images_root; + Uptane::Version latest_image_root; - bool loadTlsCommon(std::string* data, const BasedPath& path_in); + bool loadTlsCommon(std::string* data, const utils::BasedPath& path_in) const; - bool splitNameRoleVersion(const std::string& full_name, std::string* role_name, int* version); - Uptane::Version findMaxVersion(const boost::filesystem::path& meta_directory, const Uptane::Role& role); + static bool splitNameRoleVersion(const std::string& full_name, std::string* role_name, int* version); + static Uptane::Version findMaxVersion(const boost::filesystem::path& meta_directory, const Uptane::Role& role); void clearPrimaryKeys(); void clearTlsCreds(); diff --git a/src/libaktualizr/storage/invstorage.cc b/src/libaktualizr/storage/invstorage.cc index a4c592852d..fb07705257 100644 --- a/src/libaktualizr/storage/invstorage.cc +++ b/src/libaktualizr/storage/invstorage.cc @@ -1,31 +1,50 @@ #include "invstorage.h" #include +#include +#include "crypto/crypto.h" #include "fsstorage_read.h" #include "logging/logging.h" #include "sqlstorage.h" +#include "uptane/exceptions.h" #include "utilities/utils.h" -void INvStorage::importSimple(const boost::filesystem::path& base_path, store_data_t store_func, load_data_t load_func, - const BasedPath& imported_data_path) { - if (!(this->*load_func)(nullptr) && !imported_data_path.empty()) { +void INvStorage::importUpdateSimple(const boost::filesystem::path& base_path, store_data_t store_func, + load_data_t load_func, const utils::BasedPath& imported_data_path, + const std::string& data_name) { + std::string prev_content; + std::string content; + bool update = false; + if (!(this->*load_func)(&prev_content)) { + update = true; + } else if (!imported_data_path.empty()) { + content = Utils::readFile(imported_data_path.get(base_path).string()); + if (Crypto::sha256digest(content) != Crypto::sha256digest(prev_content)) { + update = true; + } + } + + if (update && !imported_data_path.empty()) { boost::filesystem::path abs_path = imported_data_path.get(base_path); if (!boost::filesystem::exists(abs_path)) { - LOG_ERROR << "Couldn't import data: " << abs_path << " doesn't exist."; + LOG_ERROR << "Couldn't import " << data_name << ": " << abs_path << " doesn't exist."; return; } - std::string content = Utils::readFile(abs_path.string()); + if (content.empty()) { + content = Utils::readFile(abs_path.string()); + } (this->*store_func)(content); + LOG_DEBUG << "Successfully imported " << data_name << " from " << abs_path; } } -void INvStorage::importUpdateSimple(const boost::filesystem::path& base_path, store_data_t store_func, - load_data_t load_func, const BasedPath& imported_data_path) { +void INvStorage::importUpdateCertificate(const boost::filesystem::path& base_path, + const utils::BasedPath& imported_data_path) { std::string prev_content; std::string content; bool update = false; - if (!(this->*load_func)(&prev_content)) { + if (!loadTlsCert(&prev_content)) { update = true; } else if (!imported_data_path.empty()) { content = Utils::readFile(imported_data_path.get(base_path).string()); @@ -37,19 +56,40 @@ void INvStorage::importUpdateSimple(const boost::filesystem::path& base_path, st if (update && !imported_data_path.empty()) { boost::filesystem::path abs_path = imported_data_path.get(base_path); if (!boost::filesystem::exists(abs_path)) { - LOG_ERROR << "Couldn't import data: " << abs_path << " doesn't exist."; + LOG_ERROR << "Couldn't import client certificate: " << abs_path << " doesn't exist."; return; } if (content.empty()) { content = Utils::readFile(abs_path.string()); } - (this->*store_func)(content); + + // Make sure the device ID of the new cert hasn't changed. + const std::string new_device_id = Crypto::extractSubjectCN(content); + std::string old_device_id; + if (!loadDeviceId(&old_device_id)) { + LOG_DEBUG << "Unable to load previous device ID."; + } else if (new_device_id != old_device_id) { + LOG_WARNING << "Certificate at " << abs_path.string() << " has a CN that may be used as device ID of " + << new_device_id << " but the device currently is identified as " << old_device_id; + } + + storeTlsCert(content); + LOG_DEBUG << "Successfully imported client certificate from " << abs_path; } } -void INvStorage::importPrimaryKeys(const boost::filesystem::path& base_path, const BasedPath& import_pubkey_path, - const BasedPath& import_privkey_path) { - if (loadPrimaryKeys(nullptr, nullptr) || import_pubkey_path.empty() || import_privkey_path.empty()) { +void INvStorage::importPrimaryKeys(const boost::filesystem::path& base_path, const utils::BasedPath& import_pubkey_path, + const utils::BasedPath& import_privkey_path) { + if (client_ == StorageClient::kTUF) { + LOG_DEBUG << "TUF instance, primary keys not required"; + return; + } + if (import_pubkey_path.empty() || import_privkey_path.empty()) { + LOG_ERROR << "Couldn`t import data: empty path received"; + return; + } + if (loadPrimaryKeys(nullptr, nullptr)) { + LOG_INFO << "Couldn`t import data: primary keys already in storage"; return; } const boost::filesystem::path pubkey_abs_path = import_pubkey_path.get(base_path); @@ -65,11 +105,12 @@ void INvStorage::importPrimaryKeys(const boost::filesystem::path& base_path, con const std::string pub_content = Utils::readFile(pubkey_abs_path.string()); const std::string priv_content = Utils::readFile(privkey_abs_path.string()); storePrimaryKeys(pub_content, priv_content); + LOG_DEBUG << "Successfully imported Uptane keys from " << pubkey_abs_path << " and " << privkey_abs_path; } void INvStorage::importInstalledVersions(const boost::filesystem::path& base_path) { std::vector installed_versions; - const boost::filesystem::path file_path = BasedPath("installed_versions").get(base_path); + const boost::filesystem::path file_path = utils::BasedPath("installed_versions").get(base_path); loadPrimaryInstallationLog(&installed_versions, false); if (!installed_versions.empty()) { return; @@ -80,24 +121,51 @@ void INvStorage::importInstalledVersions(const boost::filesystem::path& base_pat // installed versions in legacy fs storage are all for primary savePrimaryInstalledVersion(installed_versions[current_index], InstalledVersionUpdateMode::kCurrent); boost::filesystem::remove(file_path); + LOG_DEBUG << "Successfully imported installed versions from " << file_path; + } +} + +void INvStorage::importInitialRoot(const boost::filesystem::path& base_path) { + importInitialRootFile(base_path / "repo/root.json", Uptane::RepositoryType::Image()); + importInitialRootFile(base_path / "director/root.json", Uptane::RepositoryType::Director()); +} + +void INvStorage::importInitialRootFile(const boost::filesystem::path& root_path, Uptane::RepositoryType repo_type) { + std::string root_tmp; // Only needed for loadLatestRoot + if (!loadLatestRoot(&root_tmp, repo_type)) { + if (boost::filesystem::is_regular_file(root_path)) { + try { + std::string root_str = Utils::readFile(root_path); + Uptane::Root orig_root(Uptane::Root::Policy::kAcceptAll); + Uptane::Root new_root(repo_type, Utils::parseJSON(root_str), orig_root); + // No exception. Save it + storeRoot(root_str, repo_type, Uptane::Version(new_root.version())); + LOG_INFO << "Imported initial " << repo_type << " root keys from " << root_path; + } catch (Uptane::Exception& e) { + LOG_WARNING << "Couldn't import initial " << repo_type << " root keys from " << root_path << " " << e.what(); + } + } else { + LOG_DEBUG << "Not importing " << root_path << " because it doesn't exist"; + } + } else { + LOG_TRACE << "Root for " << repo_type << " already present, not importing"; } } void INvStorage::importData(const ImportConfig& import_config) { importPrimaryKeys(import_config.base_path, import_config.uptane_public_key_path, import_config.uptane_private_key_path); - // root CA certificate can be updated + importUpdateCertificate(import_config.base_path, import_config.tls_clientcert_path); importUpdateSimple(import_config.base_path, &INvStorage::storeTlsCa, &INvStorage::loadTlsCa, - import_config.tls_cacert_path); - importSimple(import_config.base_path, &INvStorage::storeTlsCert, &INvStorage::loadTlsCert, - import_config.tls_clientcert_path); - importSimple(import_config.base_path, &INvStorage::storeTlsPkey, &INvStorage::loadTlsPkey, - import_config.tls_pkey_path); - + import_config.tls_cacert_path, "server CA certificate"); + importUpdateSimple(import_config.base_path, &INvStorage::storeTlsPkey, &INvStorage::loadTlsPkey, + import_config.tls_pkey_path, "client TLS key"); importInstalledVersions(import_config.base_path); + importInitialRoot(import_config.base_path); } -std::shared_ptr INvStorage::newStorage(const StorageConfig& config, const bool readonly) { +std::shared_ptr INvStorage::newStorage(const StorageConfig& config, const bool readonly, + StorageClient client) { switch (config.type) { case StorageType::kSqlite: { boost::filesystem::path db_path = config.sqldb_path.get(config.path); @@ -116,7 +184,7 @@ std::shared_ptr INvStorage::newStorage(const StorageConfig& config, old_config.type = StorageType::kFileSystem; old_config.path = config.path; - auto sql_storage = std::make_shared(config, readonly); + auto sql_storage = std::make_shared(config, readonly, client); FSStorageRead fs_storage(old_config); INvStorage::FSSToSQLS(fs_storage, *sql_storage); return sql_storage; @@ -126,7 +194,7 @@ std::shared_ptr INvStorage::newStorage(const StorageConfig& config, } else { LOG_INFO << "Use existing SQL storage: " << db_path; } - return std::make_shared(config, readonly); + return std::make_shared(config, readonly, client); } case StorageType::kFileSystem: default: @@ -172,7 +240,9 @@ void INvStorage::FSSToSQLS(FSStorageRead& fs_storage, SQLStorage& sql_storage) { std::vector ecus; if (fs_storage.loadMisconfiguredEcus(&ecus)) { - sql_storage.storeMisconfiguredEcus(ecus); + for (auto& ecu : ecus) { + sql_storage.saveMisconfiguredEcu(ecu); + } } std::vector installed_versions; @@ -197,10 +267,10 @@ void INvStorage::FSSToSQLS(FSStorageRead& fs_storage, SQLStorage& sql_storage) { } } } - // additionally migrate the whole root metadata chain + // additionally migrate the whole Root metadata chain std::string latest_root; for (auto repo : {Uptane::RepositoryType::Director(), Uptane::RepositoryType::Image()}) { - if (fs_storage.loadLatestRoot(&latest_root, Uptane::RepositoryType::Director())) { + if (fs_storage.loadLatestRoot(&latest_root, repo)) { int latest_version = Uptane::extractVersionUntrusted(latest_root); for (int version = 0; version <= latest_version; ++version) { std::string root; @@ -217,32 +287,36 @@ void INvStorage::FSSToSQLS(FSStorageRead& fs_storage, SQLStorage& sql_storage) { bool INvStorage::fsReadInstalledVersions(const boost::filesystem::path& filename, std::vector* installed_versions, size_t* current_version) { - std::string current_hash; if (access(filename.c_str(), R_OK) != 0) { return false; } - const Json::Value installed_versions_json = Utils::parseJSONFile(filename.string()); - std::vector new_versions; - size_t k = 0; - for (Json::ValueIterator it = installed_versions_json.begin(); it != installed_versions_json.end(); ++it, ++k) { - if (!(*it).isObject()) { - // We loaded old format, migrate to new one. - Json::Value t_json; - t_json["hashes"]["sha256"] = it.key(); - Uptane::Target t((*it).asString(), t_json); - new_versions.push_back(t); - if (current_version != nullptr) { - *current_version = k; - } - } else { - if (current_version != nullptr && (*it)["is_current"].asBool()) { - *current_version = k; + try { + const Json::Value installed_versions_json = Utils::parseJSONFile(filename.string()); + std::vector new_versions; + size_t k = 0; + for (auto it = installed_versions_json.begin(); it != installed_versions_json.end(); ++it, ++k) { + if (!(*it).isObject()) { + // We loaded old format, migrate to new one. + Json::Value t_json; + t_json["hashes"]["sha256"] = it.key(); + Uptane::Target t((*it).asString(), t_json); + new_versions.push_back(t); + if (current_version != nullptr) { + *current_version = k; + } + } else { + if (current_version != nullptr && (*it)["is_current"].asBool()) { + *current_version = k; + } + Uptane::Target t(it.key().asString(), *it); + new_versions.push_back(t); } - Uptane::Target t(it.key().asString(), *it); - new_versions.push_back(t); } + *installed_versions = new_versions; + } catch (const std::exception& ex) { + LOG_ERROR << "Unable to parse installed_versions: " << ex.what(); + return false; } - *installed_versions = new_versions; return true; } diff --git a/src/libaktualizr/storage/invstorage.h b/src/libaktualizr/storage/invstorage.h index 9eee761fb3..24b249c1fd 100644 --- a/src/libaktualizr/storage/invstorage.h +++ b/src/libaktualizr/storage/invstorage.h @@ -4,26 +4,29 @@ #include #include #include +#include -#include +#include #include -#include "storage_config.h" +#include "libaktualizr/config.h" #include "storage_exception.h" - #include "uptane/tuf.h" -#include "utilities/types.h" class INvStorage; class FSStorageRead; class SQLStorage; +enum class StorageClient { kUptane = 0, kTUF = 1 }; + using store_data_t = void (INvStorage::*)(const std::string&); -using load_data_t = bool (INvStorage::*)(std::string*); +using load_data_t = bool (INvStorage::*)(std::string*) const; -typedef std::vector> EcuSerials; +using EcuSerials = std::vector>; -enum class EcuState { kOld = 0, kNotRegistered }; +// kUnused was previously kNotRegistered, but re-registration is now possible so +// that is no longer a misconfiguration. +enum class EcuState { kOld = 0, kUnused }; struct MisconfiguredEcu { MisconfiguredEcu(Uptane::EcuSerial serial_in, Uptane::HardwareIdentifier hardware_id_in, EcuState state_in) @@ -33,168 +36,123 @@ struct MisconfiguredEcu { EcuState state; }; -class StorageTargetWHandle { - public: - class WriteError : public std::runtime_error { - public: - explicit WriteError(const std::string& what) : std::runtime_error(what) {} - }; - virtual ~StorageTargetWHandle() = default; - virtual size_t wfeed(const uint8_t* buf, size_t size) = 0; - virtual void wcommit() = 0; - virtual void wabort() = 0; - size_t getWrittenSize() { return written_size_; } - - friend std::istream& operator>>(std::istream& is, StorageTargetWHandle& handle) { - std::array arr{}; - while (!is.eof()) { - is.read(reinterpret_cast(arr.data()), arr.size()); - handle.wfeed(arr.data(), static_cast(is.gcount())); - } - return is; - } - - protected: - size_t written_size_{0}; -}; - -class StorageTargetRHandle { - public: - class ReadError : public std::runtime_error { - public: - explicit ReadError(const std::string& what) : std::runtime_error(what) {} - }; - virtual ~StorageTargetRHandle() = default; - virtual bool isPartial() const = 0; - virtual std::unique_ptr toWriteHandle() = 0; - - virtual size_t rsize() const = 0; - virtual size_t rread(uint8_t* buf, size_t size) = 0; - virtual void rclose() = 0; - - void writeToFile(const boost::filesystem::path& path) { - std::array arr{}; - size_t written = 0; - std::ofstream file(path.c_str()); - if (!file.good()) { - throw std::runtime_error(std::string("Error opening file ") + path.string()); - } - while (written < rsize()) { - size_t nread = rread(arr.data(), arr.size()); - file.write(reinterpret_cast(arr.data()), static_cast(nread)); - written += nread; - } - file.close(); - } - - // FIXME this function loads the whole image to the memory - friend std::ostream& operator<<(std::ostream& os, StorageTargetRHandle& handle) { - std::array arr{}; - size_t written = 0; - while (written < handle.rsize()) { - size_t nread = handle.rread(arr.data(), arr.size()); - - os.write(reinterpret_cast(arr.data()), static_cast(nread)); - written += nread; - } - - return os; - } -}; - enum class InstalledVersionUpdateMode { kNone, kCurrent, kPending }; -// Functions loading/storing multiple pieces of data are supposed to do so atomically as far as implementation makes it -// possible +// Functions loading/storing multiple pieces of data are supposed to do so +// atomically as far as implementation makes it possible. +// +// store* functions normally write the complete content. save* functions just add an entry. class INvStorage { public: - explicit INvStorage(StorageConfig config) : config_(std::move(config)) {} + explicit INvStorage(StorageConfig config, StorageClient client = StorageClient::kUptane) + : config_(std::move(config)), client_(client) {} virtual ~INvStorage() = default; + INvStorage(const INvStorage&) = delete; + INvStorage(INvStorage&&) = delete; + INvStorage& operator=(const INvStorage&) = delete; + INvStorage& operator=(INvStorage&&) = delete; virtual StorageType type() = 0; virtual void storePrimaryKeys(const std::string& public_key, const std::string& private_key) = 0; - virtual bool loadPrimaryKeys(std::string* public_key, std::string* private_key) = 0; - virtual bool loadPrimaryPublic(std::string* public_key) = 0; - virtual bool loadPrimaryPrivate(std::string* private_key) = 0; + virtual bool loadPrimaryKeys(std::string* public_key, std::string* private_key) const = 0; + virtual bool loadPrimaryPublic(std::string* public_key) const = 0; + virtual bool loadPrimaryPrivate(std::string* private_key) const = 0; virtual void clearPrimaryKeys() = 0; + virtual void saveSecondaryInfo(const Uptane::EcuSerial& ecu_serial, const std::string& sec_type, + const PublicKey& public_key) = 0; + virtual void saveSecondaryData(const Uptane::EcuSerial& ecu_serial, const std::string& data) = 0; + virtual bool loadSecondaryInfo(const Uptane::EcuSerial& ecu_serial, SecondaryInfo* secondary) const = 0; + virtual bool loadSecondariesInfo(std::vector* secondaries) const = 0; + virtual void storeTlsCreds(const std::string& ca, const std::string& cert, const std::string& pkey) = 0; virtual void storeTlsCa(const std::string& ca) = 0; virtual void storeTlsCert(const std::string& cert) = 0; virtual void storeTlsPkey(const std::string& pkey) = 0; - virtual bool loadTlsCreds(std::string* ca, std::string* cert, std::string* pkey) = 0; - virtual bool loadTlsCa(std::string* ca) = 0; - virtual bool loadTlsCert(std::string* cert) = 0; - virtual bool loadTlsPkey(std::string* cert) = 0; + virtual bool loadTlsCreds(std::string* ca, std::string* cert, std::string* pkey) const = 0; + virtual bool loadTlsCa(std::string* ca) const = 0; + virtual bool loadTlsCert(std::string* cert) const = 0; + virtual bool loadTlsPkey(std::string* cert) const = 0; virtual void clearTlsCreds() = 0; virtual void storeRoot(const std::string& data, Uptane::RepositoryType repo, Uptane::Version version) = 0; - virtual bool loadRoot(std::string* data, Uptane::RepositoryType repo, Uptane::Version version) = 0; - bool loadLatestRoot(std::string* data, Uptane::RepositoryType repo) { + virtual bool loadRoot(std::string* data, Uptane::RepositoryType repo, Uptane::Version version) const = 0; + bool loadLatestRoot(std::string* data, Uptane::RepositoryType repo) const { return loadRoot(data, repo, Uptane::Version()); }; virtual void storeNonRoot(const std::string& data, Uptane::RepositoryType repo, Uptane::Role role) = 0; - virtual bool loadNonRoot(std::string* data, Uptane::RepositoryType repo, Uptane::Role role) = 0; + virtual bool loadNonRoot(std::string* data, Uptane::RepositoryType repo, Uptane::Role role) const = 0; virtual void clearNonRootMeta(Uptane::RepositoryType repo) = 0; virtual void clearMetadata() = 0; virtual void storeDelegation(const std::string& data, Uptane::Role role) = 0; - virtual bool loadDelegation(std::string* data, Uptane::Role role) = 0; + virtual bool loadDelegation(std::string* data, Uptane::Role role) const = 0; virtual bool loadAllDelegations(std::vector>& data) const = 0; virtual void deleteDelegation(Uptane::Role role) = 0; virtual void clearDelegations() = 0; virtual void storeDeviceId(const std::string& device_id) = 0; - virtual bool loadDeviceId(std::string* device_id) = 0; + virtual bool loadDeviceId(std::string* device_id) const = 0; virtual void clearDeviceId() = 0; virtual void storeEcuSerials(const EcuSerials& serials) = 0; - virtual bool loadEcuSerials(EcuSerials* serials) = 0; + virtual bool loadEcuSerials(EcuSerials* serials) const = 0; virtual void clearEcuSerials() = 0; - virtual void storeMisconfiguredEcus(const std::vector& ecus) = 0; - virtual bool loadMisconfiguredEcus(std::vector* ecus) = 0; + virtual void storeCachedEcuManifest(const Uptane::EcuSerial& ecu_serial, const std::string& manifest) = 0; + virtual bool loadCachedEcuManifest(const Uptane::EcuSerial& ecu_serial, std::string* manifest) const = 0; + + virtual void saveMisconfiguredEcu(const MisconfiguredEcu& ecu) = 0; + virtual bool loadMisconfiguredEcus(std::vector* ecus) const = 0; virtual void clearMisconfiguredEcus() = 0; virtual void storeEcuRegistered() = 0; // should be called after storeDeviceId - virtual bool loadEcuRegistered() = 0; + virtual bool loadEcuRegistered() const = 0; virtual void clearEcuRegistered() = 0; virtual void storeNeedReboot() = 0; - virtual bool loadNeedReboot(bool* need_reboot) = 0; + virtual bool loadNeedReboot(bool* need_reboot) const = 0; virtual void clearNeedReboot() = 0; virtual void saveInstalledVersion(const std::string& ecu_serial, const Uptane::Target& target, InstalledVersionUpdateMode update_mode) = 0; virtual bool loadInstalledVersions(const std::string& ecu_serial, boost::optional* current_version, - boost::optional* pending_version) = 0; + boost::optional* pending_version) const = 0; virtual bool loadInstallationLog(const std::string& ecu_serial, std::vector* log, - bool only_installed) = 0; + bool only_installed) const = 0; virtual bool hasPendingInstall() = 0; + virtual void getPendingEcus(std::vector>* pendingEcus) = 0; virtual void clearInstalledVersions() = 0; virtual void saveEcuInstallationResult(const Uptane::EcuSerial& ecu_serial, const data::InstallationResult& result) = 0; virtual bool loadEcuInstallationResults( - std::vector>* results) = 0; + std::vector>* results) const = 0; virtual void storeDeviceInstallationResult(const data::InstallationResult& result, const std::string& raw_report, const std::string& correlation_id) = 0; + virtual bool storeDeviceInstallationRawReport(const std::string& raw_report) = 0; virtual bool loadDeviceInstallationResult(data::InstallationResult* result, std::string* raw_report, - std::string* correlation_id) = 0; + std::string* correlation_id) const = 0; virtual void clearInstallationResults() = 0; - virtual boost::optional> checkTargetFile(const Uptane::Target& target) const = 0; + virtual void saveEcuReportCounter(const Uptane::EcuSerial& ecu_serial, int64_t counter) = 0; + virtual bool loadEcuReportCounter(std::vector>* results) const = 0; - // Incremental file API - virtual std::unique_ptr allocateTargetFile(bool from_director, - const Uptane::Target& target) = 0; + virtual void saveReportEvent(const Json::Value& json_value) = 0; + virtual bool loadReportEvents(Json::Value* report_array, int64_t* id_max, int limit = -1) const = 0; + virtual void deleteReportEvents(int64_t id_max) = 0; - virtual std::unique_ptr openTargetFile(const Uptane::Target& target) = 0; - virtual std::vector getTargetFiles() = 0; - virtual void removeTargetFile(const std::string& target_name) = 0; + virtual void storeDeviceDataHash(const std::string& data_type, const std::string& hash) = 0; + virtual bool loadDeviceDataHash(const std::string& data_type, std::string* hash) const = 0; + virtual void clearDeviceData() = 0; - virtual void cleanUp() = 0; + // Downloaded files info API + virtual void storeTargetFilename(const std::string& targetname, const std::string& filename) const = 0; + virtual std::string getTargetFilename(const std::string& targetname) const = 0; + virtual std::vector getAllTargetNames() const = 0; + virtual void deleteTargetInfo(const std::string& targetname) const = 0; // Special constructors and utilities - static std::shared_ptr newStorage(const StorageConfig& config, bool readonly = false); + static std::shared_ptr newStorage(const StorageConfig& config, bool readonly = false, + StorageClient client = StorageClient::kUptane); static void FSSToSQLS(FSStorageRead& fs_storage, SQLStorage& sql_storage); static bool fsReadInstalledVersions(const boost::filesystem::path& filename, std::vector* installed_versions, size_t* current_version); @@ -202,27 +160,40 @@ class INvStorage { // Not purely virtual void importData(const ImportConfig& import_config); bool loadPrimaryInstalledVersions(boost::optional* current_version, - boost::optional* pending_version) { + boost::optional* pending_version) const { return loadInstalledVersions("", current_version, pending_version); } void savePrimaryInstalledVersion(const Uptane::Target& target, InstalledVersionUpdateMode update_mode) { return saveInstalledVersion("", target, update_mode); } - bool loadPrimaryInstallationLog(std::vector* log, bool only_installed) { + bool loadPrimaryInstallationLog(std::vector* log, bool only_installed) const { return loadInstallationLog("", log, only_installed); } + void importInstalledVersions(const boost::filesystem::path& base_path); private: - void importSimple(const boost::filesystem::path& base_path, store_data_t store_func, load_data_t load_func, - const BasedPath& imported_data_path); void importUpdateSimple(const boost::filesystem::path& base_path, store_data_t store_func, load_data_t load_func, - const BasedPath& imported_data_path); - void importPrimaryKeys(const boost::filesystem::path& base_path, const BasedPath& import_pubkey_path, - const BasedPath& import_privkey_path); - void importInstalledVersions(const boost::filesystem::path& base_path); + const utils::BasedPath& imported_data_path, const std::string& data_name); + void importUpdateCertificate(const boost::filesystem::path& base_path, const utils::BasedPath& imported_data_path); + void importPrimaryKeys(const boost::filesystem::path& base_path, const utils::BasedPath& import_pubkey_path, + const utils::BasedPath& import_privkey_path); + + /** + * Import initial image and director root.json from the filesystem. + * These would be loaded onto the device during provisioning at a well-known + * location such as /var/sota/import/repo/root.json (image repo) and + * /var/sota/import/director/root.json for the director repo. + * + * @param base_path e.g. '/var/sota/import' + */ + void importInitialRoot(const boost::filesystem::path& base_path); + void importInitialRootFile(const boost::filesystem::path& root_path, Uptane::RepositoryType repo_type); protected: const StorageConfig config_; + + private: + StorageClient client_; }; #endif // INVSTORAGE_H_ diff --git a/src/libaktualizr/storage/schema_migration_test.sh b/src/libaktualizr/storage/schema_migration_test.sh index 979a385b8e..677f0bcfa5 100755 --- a/src/libaktualizr/storage/schema_migration_test.sh +++ b/src/libaktualizr/storage/schema_migration_test.sh @@ -24,7 +24,7 @@ sqlite3 -batch -init "$SQL_DIR/schema.sql" "$DB_CUR" ";" for f in "$SQL_DIR"/migration/migrate.*.sql; do R=$( { sqlite3 -batch -init "$f" "$DB_MIG" ";"; } 2>&1 ) if [ -n "$R" ]; then - echo $R + echo "$R" exit 1 fi done diff --git a/src/libaktualizr/storage/sql_utils.h b/src/libaktualizr/storage/sql_utils.h index afc69c96c5..28a48cce17 100644 --- a/src/libaktualizr/storage/sql_utils.h +++ b/src/libaktualizr/storage/sql_utils.h @@ -4,8 +4,10 @@ #include #include #include +#include +#include -#include +#include #include #include @@ -19,14 +21,14 @@ struct SQLBlob { explicit SQLBlob(const std::string& str) : content(str) {} }; -struct SQLZeroBlob { - size_t size; +class SQLException : public std::runtime_error { + public: + explicit SQLException(const std::string& what = "SQL error") : std::runtime_error(what) {} }; -class SQLException : public std::runtime_error { +class SQLInternalException : public SQLException { public: - SQLException(const std::string& what = "SQL error") : std::runtime_error(what) {} - ~SQLException() noexcept override = default; + explicit SQLInternalException(const std::string& what = "SQL internal error") : SQLException(what) {} }; class SQLiteStatement { @@ -38,7 +40,7 @@ class SQLiteStatement { if (sqlite3_prepare_v2(db_, zSql.c_str(), -1, &statement, nullptr) != SQLITE_OK) { LOG_ERROR << "Could not prepare statement: " << sqlite3_errmsg(db_); - throw SQLException(); + throw SQLInternalException(std::string("Could not prepare statement: ") + sqlite3_errmsg(db_)); } stmt_.reset(statement); @@ -50,15 +52,16 @@ class SQLiteStatement { // get results inline boost::optional get_result_col_blob(int iCol) { - auto b = reinterpret_cast(sqlite3_column_blob(stmt_.get(), iCol)); + const auto* b = reinterpret_cast(sqlite3_column_blob(stmt_.get(), iCol)); if (b == nullptr) { return boost::none; } - return std::string(b); + auto length = static_cast(sqlite3_column_bytes(stmt_.get(), iCol)); + return std::string(b, length); } inline boost::optional get_result_col_str(int iCol) { - auto b = reinterpret_cast(sqlite3_column_text(stmt_.get(), iCol)); + const auto* b = reinterpret_cast(sqlite3_column_text(stmt_.get(), iCol)); if (b == nullptr) { return boost::none; } @@ -71,14 +74,14 @@ class SQLiteStatement { void bindArgument(int v) { if (sqlite3_bind_int(stmt_.get(), bind_cnt_, v) != SQLITE_OK) { LOG_ERROR << "Could not bind: " << sqlite3_errmsg(db_); - throw std::runtime_error("SQLite bind error"); + throw SQLInternalException(std::string("SQLite bind error: ") + sqlite3_errmsg(db_)); } } void bindArgument(int64_t v) { if (sqlite3_bind_int64(stmt_.get(), bind_cnt_, v) != SQLITE_OK) { LOG_ERROR << "Could not bind: " << sqlite3_errmsg(db_); - throw std::runtime_error("SQLite bind error"); + throw SQLInternalException(std::string("SQLite bind error: ") + sqlite3_errmsg(db_)); } } @@ -88,7 +91,7 @@ class SQLiteStatement { if (sqlite3_bind_text(stmt_.get(), bind_cnt_, oe.c_str(), -1, nullptr) != SQLITE_OK) { LOG_ERROR << "Could not bind: " << sqlite3_errmsg(db_); - throw std::runtime_error("SQLite bind error"); + throw SQLInternalException(std::string("SQLite bind error: ") + sqlite3_errmsg(db_)); } } @@ -101,14 +104,7 @@ class SQLiteStatement { if (sqlite3_bind_blob(stmt_.get(), bind_cnt_, oe.c_str(), static_cast(oe.size()), SQLITE_STATIC) != SQLITE_OK) { LOG_ERROR << "Could not bind: " << sqlite3_errmsg(db_); - throw std::runtime_error("SQLite bind error"); - } - } - - void bindArgument(const SQLZeroBlob& blob) { - if (sqlite3_bind_zeroblob(stmt_.get(), bind_cnt_, static_cast(blob.size)) != SQLITE_OK) { - LOG_ERROR << "Could not bind: " << sqlite3_errmsg(db_); - throw std::runtime_error("SQLite bind error"); + throw SQLInternalException("SQLite bind error"); } } @@ -131,15 +127,19 @@ class SQLiteStatement { }; // Unique ownership SQLite3 connection -extern std::mutex sql_mutex; +const extern std::mutex sql_mutex; class SQLite3Guard { public: sqlite3* get() { return handle_.get(); } - int get_rc() { return rc_; } + int get_rc() const { return rc_; } - explicit SQLite3Guard(const char* path, bool readonly) : handle_(nullptr, sqlite3_close), rc_(0) { + explicit SQLite3Guard(const char* path, bool readonly, std::shared_ptr mutex = nullptr) + : handle_(nullptr, sqlite3_close), rc_(0), m_(std::move(mutex)) { + if (m_) { + m_->lock(); + } if (sqlite3_threadsafe() == 0) { - throw std::runtime_error("sqlite3 has been compiled without multitheading support"); + throw SQLInternalException("sqlite3 has been compiled without multitheading support"); } sqlite3* h; if (readonly) { @@ -147,14 +147,25 @@ class SQLite3Guard { } else { rc_ = sqlite3_open_v2(path, &h, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_NOMUTEX, nullptr); } + + /* retry operations for 2 seconds before returning SQLITE_BUSY */ + sqlite3_busy_timeout(h, 2000); + handle_.reset(h); } - explicit SQLite3Guard(const boost::filesystem::path& path, bool readonly = false) - : SQLite3Guard(path.c_str(), readonly) {} + explicit SQLite3Guard(const boost::filesystem::path& path, bool readonly = false, + std::shared_ptr mutex = nullptr) + : SQLite3Guard(path.c_str(), readonly, std::move(mutex)) {} SQLite3Guard(SQLite3Guard&& guard) noexcept : handle_(std::move(guard.handle_)), rc_(guard.rc_) {} + ~SQLite3Guard() { + if (m_) { + m_->unlock(); + } + } SQLite3Guard(const SQLite3Guard& guard) = delete; - SQLite3Guard operator=(const SQLite3Guard& guard) = delete; + SQLite3Guard& operator=(const SQLite3Guard& guard) = delete; + SQLite3Guard& operator=(SQLite3Guard&&) = delete; int exec(const char* sql, int (*callback)(void*, int, char**, char**), void* cb_arg) { return sqlite3_exec(handle_.get(), sql, callback, cb_arg, nullptr); @@ -179,35 +190,33 @@ class SQLite3Guard { // if `rollbackTransaction()` is called explicitely, the changes will be // rolled back - bool beginTransaction() { + void beginTransaction() { // Note: transaction cannot be nested and this will fail if another // transaction was open on the same connection - int ret = exec("BEGIN TRANSACTION;", nullptr, nullptr); - if (ret != SQLITE_OK) { + if (exec("BEGIN TRANSACTION;", nullptr, nullptr) != SQLITE_OK) { LOG_ERROR << "Can't begin transaction: " << errmsg(); + throw SQLInternalException(std::string("Can't begin transaction: ") + errmsg()); } - return ret == SQLITE_OK; } - bool commitTransaction() { - int ret = exec("COMMIT TRANSACTION;", nullptr, nullptr); - if (ret != SQLITE_OK) { + void commitTransaction() { + if (exec("COMMIT TRANSACTION;", nullptr, nullptr) != SQLITE_OK) { LOG_ERROR << "Can't commit transaction: " << errmsg(); + throw SQLInternalException(std::string("Can't begin transaction: ") + errmsg()); } - return ret == SQLITE_OK; } - bool rollbackTransaction() { - int ret = exec("ROLLBACK TRANSACTION;", nullptr, nullptr); - if (ret != SQLITE_OK) { + void rollbackTransaction() { + if (exec("ROLLBACK TRANSACTION;", nullptr, nullptr) != SQLITE_OK) { LOG_ERROR << "Can't rollback transaction: " << errmsg(); + throw SQLInternalException(std::string("Can't begin transaction: ") + errmsg()); } - return ret == SQLITE_OK; } private: std::unique_ptr handle_; int rc_; + std::shared_ptr m_ = nullptr; }; #endif // SQL_UTILS_H_ diff --git a/src/libaktualizr/storage/sqlstorage.cc b/src/libaktualizr/storage/sqlstorage.cc index b391948e02..3da82d8020 100644 --- a/src/libaktualizr/storage/sqlstorage.cc +++ b/src/libaktualizr/storage/sqlstorage.cc @@ -11,14 +11,11 @@ #include "sql_utils.h" #include "utilities/utils.h" -// find metadata with version set to -1 (e.g. after migration) and assign proper version to it +// Find metadata with version set to -1 (e.g. after migration) and assign proper version to it. void SQLStorage::cleanMetaVersion(Uptane::RepositoryType repo, const Uptane::Role& role) { SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); - return; - } + db.beginTransaction(); auto statement = db.prepareStatement( "SELECT meta FROM meta WHERE (repo=? AND meta_type=? AND version=?);", static_cast(repo), role.ToInt(), -1); @@ -26,26 +23,26 @@ void SQLStorage::cleanMetaVersion(Uptane::RepositoryType repo, const Uptane::Rol int result = statement.step(); if (result == SQLITE_DONE) { - LOG_TRACE << "meta with role " << role.ToString() << " in repo " << repo.toString() << " not present in db"; + // Nothing to do here. The log message that used to be here was confusing. return; } else if (result != SQLITE_ROW) { - LOG_ERROR << "Can't get meta: " << db.errmsg(); + LOG_ERROR << "Failed to get " << repo << " " << role << " metadata: " << db.errmsg(); return; } - std::string meta = std::string(reinterpret_cast(sqlite3_column_blob(statement.get(), 0))); + const std::string meta = std::string(reinterpret_cast(sqlite3_column_blob(statement.get(), 0))); - int version = Uptane::extractVersionUntrusted(meta); + const int version = Uptane::extractVersionUntrusted(meta); if (version < 0) { - LOG_ERROR << "Corrupted metadata"; + LOG_ERROR << "Corrupted " << repo << " " << role << " metadata."; return; } - // in there is already metadata with such version delete it + // If there is already metadata with the same version, delete it. statement = db.prepareStatement("DELETE FROM meta WHERE (repo=? AND meta_type=? AND version=?);", static_cast(repo), role.ToInt(), version); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't clear metadata: " << db.errmsg(); + LOG_ERROR << "Failed to clear " << repo << " " << role << " metadata: " << db.errmsg(); return; } @@ -54,18 +51,18 @@ void SQLStorage::cleanMetaVersion(Uptane::RepositoryType repo, const Uptane::Rol role.ToInt(), -1); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't update metadata: " << db.errmsg(); + LOG_ERROR << "Failed to update " << repo << " " << role << " metadata: " << db.errmsg(); return; } db.commitTransaction(); } -SQLStorage::SQLStorage(const StorageConfig& config, bool readonly) +SQLStorage::SQLStorage(const StorageConfig& config, bool readonly, StorageClient storage_client) : SQLStorageBase(config.sqldb_path.get(config.path), readonly, libaktualizr_schema_migrations, libaktualizr_schema_rollback_migrations, libaktualizr_current_schema, libaktualizr_current_schema_version), - INvStorage(config) { + INvStorage(config, storage_client) { try { cleanMetaVersion(Uptane::RepositoryType::Director(), Uptane::Role::Root()); cleanMetaVersion(Uptane::RepositoryType::Image(), Uptane::Role::Root()); @@ -80,26 +77,26 @@ void SQLStorage::storePrimaryKeys(const std::string& public_key, const std::stri auto statement = db.prepareStatement( "INSERT OR REPLACE INTO primary_keys(unique_mark,public,private) VALUES (0,?,?);", public_key, private_key); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set primary keys: " << db.errmsg(); + LOG_ERROR << "Failed to set Primary keys: " << db.errmsg(); return; } } -bool SQLStorage::loadPrimaryKeys(std::string* public_key, std::string* private_key) { +bool SQLStorage::loadPrimaryKeys(std::string* public_key, std::string* private_key) const { return loadPrimaryPublic(public_key) && loadPrimaryPrivate(private_key); } -bool SQLStorage::loadPrimaryPublic(std::string* public_key) { +bool SQLStorage::loadPrimaryPublic(std::string* public_key) const { SQLite3Guard db = dbConnection(); auto statement = db.prepareStatement("SELECT public FROM primary_keys LIMIT 1;"); int result = statement.step(); if (result == SQLITE_DONE) { - LOG_TRACE << "No public key in db"; + LOG_TRACE << "Uptane public key not found in database"; return false; } else if (result != SQLITE_ROW) { - LOG_ERROR << "Can't get public key: " << db.errmsg(); + LOG_ERROR << "Failed to get Uptane public key: " << db.errmsg(); return false; } @@ -115,17 +112,17 @@ bool SQLStorage::loadPrimaryPublic(std::string* public_key) { return true; } -bool SQLStorage::loadPrimaryPrivate(std::string* private_key) { +bool SQLStorage::loadPrimaryPrivate(std::string* private_key) const { SQLite3Guard db = dbConnection(); auto statement = db.prepareStatement("SELECT private FROM primary_keys LIMIT 1;"); int result = statement.step(); if (result == SQLITE_DONE) { - LOG_TRACE << "No private key in db"; + LOG_TRACE << "Uptane private key not found in database"; return false; } else if (result != SQLITE_ROW) { - LOG_ERROR << "Can't get private key: " << db.errmsg(); + LOG_ERROR << "Failed to get Uptane private key: " << db.errmsg(); return false; } @@ -145,11 +142,158 @@ void SQLStorage::clearPrimaryKeys() { SQLite3Guard db = dbConnection(); if (db.exec("DELETE FROM primary_keys;", nullptr, nullptr) != SQLITE_OK) { - LOG_ERROR << "Can't clear primary keys: " << db.errmsg(); + LOG_ERROR << "Failed to clear Primary keys: " << db.errmsg(); return; } } +void SQLStorage::saveSecondaryInfo(const Uptane::EcuSerial& ecu_serial, const std::string& sec_type, + const PublicKey& public_key) { + SQLite3Guard db = dbConnection(); + + std::stringstream key_type_ss; + key_type_ss << public_key.Type(); + std::string key_type_str; + key_type_str = key_type_ss.str(); + key_type_str.erase(std::remove(key_type_str.begin(), key_type_str.end(), '"'), key_type_str.end()); + + db.beginTransaction(); + + auto statement = + db.prepareStatement("SELECT count(*) FROM secondary_ecus WHERE serial = ?;", ecu_serial.ToString()); + if (statement.step() != SQLITE_ROW) { + throw SQLException(db.errmsg().insert(0, "Failed to get count of secondary_ecus table: ")); + } + + const char* req; + if (statement.get_result_col_int(0) != 0) { + req = "UPDATE secondary_ecus SET sec_type = ?, public_key_type = ?, public_key = ? WHERE serial = ?;"; + } else { + req = + "INSERT INTO secondary_ecus (serial, sec_type, public_key_type, public_key) SELECT " + "serial,?,?,? FROM ecus WHERE (serial = ? AND is_primary = 0);"; + } + + statement = db.prepareStatement( + req, sec_type, key_type_str, public_key.Value(), ecu_serial.ToString()); + if (statement.step() != SQLITE_DONE || sqlite3_changes(db.get()) != 1) { + throw SQLException(db.errmsg().insert(0, "Failed to set Secondary key: ")); + } + + db.commitTransaction(); +} + +void SQLStorage::saveSecondaryData(const Uptane::EcuSerial& ecu_serial, const std::string& data) { + SQLite3Guard db = dbConnection(); + + db.beginTransaction(); + + auto statement = + db.prepareStatement("SELECT count(*) FROM secondary_ecus WHERE serial = ?;", ecu_serial.ToString()); + if (statement.step() != SQLITE_ROW) { + throw SQLException(db.errmsg().insert(0, "Failed to get count of secondary_ecus table: ")); + } + + const char* req; + if (statement.get_result_col_int(0) != 0) { + req = "UPDATE secondary_ecus SET extra = ? WHERE serial = ?;"; + } else { + req = "INSERT INTO secondary_ecus (extra, serial) VALUES (?,?);"; + } + + statement = db.prepareStatement(req, data, ecu_serial.ToString()); + if (statement.step() != SQLITE_DONE || sqlite3_changes(db.get()) != 1) { + throw SQLException(db.errmsg().insert(0, "Failed to set Secondary data: ")); + } + + db.commitTransaction(); +} + +bool SQLStorage::loadSecondaryInfo(const Uptane::EcuSerial& ecu_serial, SecondaryInfo* secondary) const { + SQLite3Guard db = dbConnection(); + + SecondaryInfo new_sec{}; + + auto statement = db.prepareStatement( + "SELECT serial, hardware_id, sec_type, public_key_type, public_key, extra FROM ecus LEFT JOIN secondary_ecus " + "USING " + "(serial) WHERE (serial = ? AND is_primary = 0);", + ecu_serial.ToString()); + int statement_state = statement.step(); + if (statement_state == SQLITE_DONE) { + LOG_TRACE << "Secondary ECU " << ecu_serial << " not found in database"; + return false; + } else if (statement_state != SQLITE_ROW) { + LOG_ERROR << "Failed to load Secondary info: " << db.errmsg(); + return false; + } + + try { + Uptane::EcuSerial serial = Uptane::EcuSerial(statement.get_result_col_str(0).value()); + Uptane::HardwareIdentifier hw_id = Uptane::HardwareIdentifier(statement.get_result_col_str(1).value()); + std::string sec_type = statement.get_result_col_str(2).value_or(""); + std::string kt_str = statement.get_result_col_str(3).value_or(""); + PublicKey key; + if (!kt_str.empty()) { + KeyType key_type; + std::stringstream(kt_str) >> key_type; + key = PublicKey(statement.get_result_col_str(4).value_or(""), key_type); + } + std::string extra = statement.get_result_col_str(5).value_or(""); + new_sec = SecondaryInfo{serial, hw_id, sec_type, key, extra}; + } catch (const boost::bad_optional_access&) { + return false; + } + + if (secondary != nullptr) { + *secondary = std::move(new_sec); + } + + return true; +} + +bool SQLStorage::loadSecondariesInfo(std::vector* secondaries) const { + SQLite3Guard db = dbConnection(); + + std::vector new_secs; + + bool empty = true; + + int statement_state; + auto statement = db.prepareStatement( + "SELECT serial, hardware_id, sec_type, public_key_type, public_key, extra FROM ecus LEFT JOIN secondary_ecus " + "USING " + "(serial) WHERE is_primary = 0 ORDER BY ecus.id;"); + while ((statement_state = statement.step()) == SQLITE_ROW) { + try { + Uptane::EcuSerial serial = Uptane::EcuSerial(statement.get_result_col_str(0).value()); + Uptane::HardwareIdentifier hw_id = Uptane::HardwareIdentifier(statement.get_result_col_str(1).value()); + std::string sec_type = statement.get_result_col_str(2).value_or(""); + std::string kt_str = statement.get_result_col_str(3).value_or(""); + PublicKey key; + if (!kt_str.empty()) { + KeyType key_type; + std::stringstream(kt_str) >> key_type; + key = PublicKey(statement.get_result_col_str(4).value_or(""), key_type); + } + std::string extra = statement.get_result_col_str(5).value_or(""); + new_secs.emplace_back(SecondaryInfo{serial, hw_id, sec_type, key, extra}); + empty = false; + } catch (const boost::bad_optional_access&) { + continue; + } + } + if (statement_state != SQLITE_DONE) { + LOG_ERROR << "Failed to load Secondary info" << db.errmsg(); + } + + if (secondaries != nullptr) { + *secondaries = std::move(new_secs); + } + + return !empty; +} + void SQLStorage::storeTlsCreds(const std::string& ca, const std::string& cert, const std::string& pkey) { storeTlsCa(ca); storeTlsCert(cert); @@ -159,14 +303,11 @@ void SQLStorage::storeTlsCreds(const std::string& ca, const std::string& cert, c void SQLStorage::storeTlsCa(const std::string& ca) { SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); - return; - } + db.beginTransaction(); auto statement = db.prepareStatement("SELECT count(*) FROM tls_creds;"); if (statement.step() != SQLITE_ROW) { - LOG_ERROR << "Can't get count of tls_creds table: " << db.errmsg(); + LOG_ERROR << "Failed to get count of tls_creds table: " << db.errmsg(); return; } @@ -179,7 +320,7 @@ void SQLStorage::storeTlsCa(const std::string& ca) { statement = db.prepareStatement(req, SQLBlob(ca)); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set ca_cert: " << db.errmsg(); + LOG_ERROR << "Failed to set CA certificate: " << db.errmsg(); return; } @@ -189,14 +330,11 @@ void SQLStorage::storeTlsCa(const std::string& ca) { void SQLStorage::storeTlsCert(const std::string& cert) { SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); - return; - } + db.beginTransaction(); auto statement = db.prepareStatement("SELECT count(*) FROM tls_creds;"); if (statement.step() != SQLITE_ROW) { - LOG_ERROR << "Can't get count of tls_creds table: " << db.errmsg(); + LOG_ERROR << "Failed to get count of tls_creds table: " << db.errmsg(); return; } @@ -209,7 +347,7 @@ void SQLStorage::storeTlsCert(const std::string& cert) { statement = db.prepareStatement(req, SQLBlob(cert)); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set client_cert: " << db.errmsg(); + LOG_ERROR << "Failed to set client certificate: " << db.errmsg(); return; } @@ -219,13 +357,11 @@ void SQLStorage::storeTlsCert(const std::string& cert) { void SQLStorage::storeTlsPkey(const std::string& pkey) { SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); - return; - } + db.beginTransaction(); + auto statement = db.prepareStatement("SELECT count(*) FROM tls_creds;"); if (statement.step() != SQLITE_ROW) { - LOG_ERROR << "Can't get count of tls_creds table: " << db.errmsg(); + LOG_ERROR << "Failed to get count of tls_creds table: " << db.errmsg(); return; } @@ -238,32 +374,30 @@ void SQLStorage::storeTlsPkey(const std::string& pkey) { statement = db.prepareStatement(req, SQLBlob(pkey)); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set client_pkey: " << db.errmsg(); + LOG_ERROR << "Failed to set client private key: " << db.errmsg(); return; } db.commitTransaction(); } -bool SQLStorage::loadTlsCreds(std::string* ca, std::string* cert, std::string* pkey) { +bool SQLStorage::loadTlsCreds(std::string* ca, std::string* cert, std::string* pkey) const { SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); - return false; - } auto statement = db.prepareStatement("SELECT ca_cert, client_cert, client_pkey FROM tls_creds LIMIT 1;"); int result = statement.step(); if (result == SQLITE_DONE) { - LOG_TRACE << "Tls creds not present"; + LOG_TRACE << "TLS credentials not found in database"; return false; } else if (result != SQLITE_ROW) { - LOG_ERROR << "Can't get tls_creds: " << db.errmsg(); + LOG_ERROR << "Failed to get TLS credentials: " << db.errmsg(); return false; } - std::string ca_v, cert_v, pkey_v; + std::string ca_v; + std::string cert_v; + std::string pkey_v; try { ca_v = statement.get_result_col_str(0).value(); cert_v = statement.get_result_col_str(1).value(); @@ -282,8 +416,6 @@ bool SQLStorage::loadTlsCreds(std::string* ca, std::string* cert, std::string* p *pkey = std::move(pkey_v); } - db.commitTransaction(); - return true; } @@ -291,22 +423,22 @@ void SQLStorage::clearTlsCreds() { SQLite3Guard db = dbConnection(); if (db.exec("DELETE FROM tls_creds;", nullptr, nullptr) != SQLITE_OK) { - LOG_ERROR << "Can't clear tls_creds: " << db.errmsg(); + LOG_ERROR << "Failed to clear TLS credentials: " << db.errmsg(); return; } } -bool SQLStorage::loadTlsCa(std::string* ca) { +bool SQLStorage::loadTlsCa(std::string* ca) const { SQLite3Guard db = dbConnection(); auto statement = db.prepareStatement("SELECT ca_cert FROM tls_creds LIMIT 1;"); int result = statement.step(); if (result == SQLITE_DONE) { - LOG_TRACE << "ca_cert not present"; + LOG_TRACE << "CA certificate not found in database"; return false; } else if (result != SQLITE_ROW) { - LOG_ERROR << "Can't get ca_cert: " << db.errmsg(); + LOG_ERROR << "Failed to get CA certificate: " << db.errmsg(); return false; } @@ -322,17 +454,17 @@ bool SQLStorage::loadTlsCa(std::string* ca) { return true; } -bool SQLStorage::loadTlsCert(std::string* cert) { +bool SQLStorage::loadTlsCert(std::string* cert) const { SQLite3Guard db = dbConnection(); auto statement = db.prepareStatement("SELECT client_cert FROM tls_creds LIMIT 1;"); int result = statement.step(); if (result == SQLITE_DONE) { - LOG_TRACE << "client_cert not present in db"; + LOG_TRACE << "Client certificate not found in database"; return false; } else if (result != SQLITE_ROW) { - LOG_ERROR << "Can't get client_cert: " << db.errmsg(); + LOG_ERROR << "Failed to get client certificate: " << db.errmsg(); return false; } @@ -348,17 +480,17 @@ bool SQLStorage::loadTlsCert(std::string* cert) { return true; } -bool SQLStorage::loadTlsPkey(std::string* pkey) { +bool SQLStorage::loadTlsPkey(std::string* pkey) const { SQLite3Guard db = dbConnection(); auto statement = db.prepareStatement("SELECT client_pkey FROM tls_creds LIMIT 1;"); int result = statement.step(); if (result == SQLITE_DONE) { - LOG_TRACE << "client_pkey not present in db"; + LOG_TRACE << "Client private key not found in database"; return false; } else if (result != SQLITE_ROW) { - LOG_ERROR << "Can't get client_pkey: " << db.errmsg(); + LOG_ERROR << "Failed to get client private key: " << db.errmsg(); return false; } @@ -377,17 +509,14 @@ bool SQLStorage::loadTlsPkey(std::string* pkey) { void SQLStorage::storeRoot(const std::string& data, Uptane::RepositoryType repo, Uptane::Version version) { SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); - return; - } + db.beginTransaction(); auto del_statement = db.prepareStatement("DELETE FROM meta WHERE (repo=? AND meta_type=? AND version=?);", static_cast(repo), Uptane::Role::Root().ToInt(), version.version()); if (del_statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't clear root metadata: " << db.errmsg(); + LOG_ERROR << "Failed to clear Root metadata: " << db.errmsg(); return; } @@ -396,7 +525,7 @@ void SQLStorage::storeRoot(const std::string& data, Uptane::RepositoryType repo, Uptane::Role::Root().ToInt(), version.version()); if (ins_statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't add metadata: " << db.errmsg(); + LOG_ERROR << "Failed to store Root metadata: " << db.errmsg(); return; } @@ -406,16 +535,14 @@ void SQLStorage::storeRoot(const std::string& data, Uptane::RepositoryType repo, void SQLStorage::storeNonRoot(const std::string& data, Uptane::RepositoryType repo, const Uptane::Role role) { SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); - return; - } + LOG_DEBUG << "Storing " << role << " for " << repo << " repo in SQL storage"; + db.beginTransaction(); auto del_statement = db.prepareStatement("DELETE FROM meta WHERE (repo=? AND meta_type=?);", static_cast(repo), role.ToInt()); if (del_statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't clear metadata: " << db.errmsg(); + LOG_ERROR << "Failed to clear " << role << " metadata: " << db.errmsg(); return; } @@ -424,14 +551,14 @@ void SQLStorage::storeNonRoot(const std::string& data, Uptane::RepositoryType re static_cast(repo), role.ToInt(), Uptane::Version().version()); if (ins_statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't add metadata: " << db.errmsg(); + LOG_ERROR << "Failed to add " << role << "metadata: " << db.errmsg(); return; } db.commitTransaction(); } -bool SQLStorage::loadRoot(std::string* data, Uptane::RepositoryType repo, Uptane::Version version) { +bool SQLStorage::loadRoot(std::string* data, Uptane::RepositoryType repo, Uptane::Version version) const { SQLite3Guard db = dbConnection(); // version < 0 => latest metadata requested @@ -442,10 +569,10 @@ bool SQLStorage::loadRoot(std::string* data, Uptane::RepositoryType repo, Uptane int result = statement.step(); if (result == SQLITE_DONE) { - LOG_TRACE << "Meta not present"; + LOG_TRACE << "Root metadata not found in database"; return false; } else if (result != SQLITE_ROW) { - LOG_ERROR << "Can't get meta: " << db.errmsg(); + LOG_ERROR << "Failed to get Root metadata: " << db.errmsg(); return false; } if (data != nullptr) { @@ -459,16 +586,16 @@ bool SQLStorage::loadRoot(std::string* data, Uptane::RepositoryType repo, Uptane int result = statement.step(); if (result == SQLITE_DONE) { - LOG_TRACE << "Meta not present"; + LOG_TRACE << "Root metadata not found in database"; return false; } else if (result != SQLITE_ROW) { - LOG_ERROR << "Can't get meta: " << db.errmsg(); + LOG_ERROR << "Failed to get Root metadata: " << db.errmsg(); return false; } - const auto blob = reinterpret_cast(sqlite3_column_blob(statement.get(), 0)); + const auto* const blob = reinterpret_cast(sqlite3_column_blob(statement.get(), 0)); if (blob == nullptr) { - LOG_ERROR << "Can't get meta: " << db.errmsg(); + LOG_ERROR << "Failed to get Root metadata: " << db.errmsg(); return false; } @@ -480,7 +607,7 @@ bool SQLStorage::loadRoot(std::string* data, Uptane::RepositoryType repo, Uptane return true; } -bool SQLStorage::loadNonRoot(std::string* data, Uptane::RepositoryType repo, const Uptane::Role role) { +bool SQLStorage::loadNonRoot(std::string* data, Uptane::RepositoryType repo, const Uptane::Role role) const { SQLite3Guard db = dbConnection(); auto statement = db.prepareStatement( @@ -489,10 +616,10 @@ bool SQLStorage::loadNonRoot(std::string* data, Uptane::RepositoryType repo, con int result = statement.step(); if (result == SQLITE_DONE) { - LOG_TRACE << "Meta not present"; + LOG_TRACE << role << " metadata not found in database"; return false; } else if (result != SQLITE_ROW) { - LOG_ERROR << "Can't get meta: " << db.errmsg(); + LOG_ERROR << "Failed to get " << role << " metadata: " << db.errmsg(); return false; } if (data != nullptr) { @@ -509,7 +636,7 @@ void SQLStorage::clearNonRootMeta(Uptane::RepositoryType repo) { db.prepareStatement("DELETE FROM meta WHERE (repo=? AND meta_type != 0);", static_cast(repo)); if (del_statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't clear metadata: " << db.errmsg(); + LOG_ERROR << "Failed to clear metadata: " << db.errmsg(); } } @@ -517,7 +644,7 @@ void SQLStorage::clearMetadata() { SQLite3Guard db = dbConnection(); if (db.exec("DELETE FROM meta;", nullptr, nullptr) != SQLITE_OK) { - LOG_ERROR << "Can't clear metadata: " << db.errmsg(); + LOG_ERROR << "Failed to clear metadata: " << db.errmsg(); return; } } @@ -525,23 +652,15 @@ void SQLStorage::clearMetadata() { void SQLStorage::storeDelegation(const std::string& data, const Uptane::Role role) { SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); - return; - } - auto statement = db.prepareStatement("INSERT OR REPLACE INTO delegations VALUES (?, ?);", SQLBlob(data), role.ToString()); - if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't add delegation metadata: " << db.errmsg(); + LOG_ERROR << "Failed to store delegation metadata: " << db.errmsg(); return; } - - db.commitTransaction(); } -bool SQLStorage::loadDelegation(std::string* data, const Uptane::Role role) { +bool SQLStorage::loadDelegation(std::string* data, const Uptane::Role role) const { SQLite3Guard db = dbConnection(); auto statement = @@ -549,10 +668,10 @@ bool SQLStorage::loadDelegation(std::string* data, const Uptane::Role role) { int result = statement.step(); if (result == SQLITE_DONE) { - LOG_TRACE << "Delegations metadata not present"; + LOG_TRACE << "Delegations metadata not found in database"; return false; } else if (result != SQLITE_ROW) { - LOG_ERROR << "Can't get delegations metadata: " << db.errmsg(); + LOG_ERROR << "Failed to get delegations metadata: " << db.errmsg(); return false; } if (data != nullptr) { @@ -572,10 +691,10 @@ bool SQLStorage::loadAllDelegations(std::vector( "INSERT OR REPLACE INTO device_info(unique_mark,device_id,is_registered) VALUES(0,?,0);", device_id); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set device ID: " << db.errmsg(); + LOG_ERROR << "Failed to set device ID: " << db.errmsg(); return; } } -bool SQLStorage::loadDeviceId(std::string* device_id) { +bool SQLStorage::loadDeviceId(std::string* device_id) const { SQLite3Guard db = dbConnection(); auto statement = db.prepareStatement("SELECT device_id FROM device_info LIMIT 1;"); int result = statement.step(); if (result == SQLITE_DONE) { - LOG_TRACE << "device_id not present in db"; + LOG_TRACE << "Device ID key not found in database"; return false; } else if (result != SQLITE_ROW) { - LOG_ERROR << "Can't get device ID: " << db.errmsg(); + LOG_ERROR << "Failed to get device ID: " << db.errmsg(); return false; } auto did = statement.get_result_col_str(0); if (did == boost::none) { - LOG_ERROR << "Empty device ID" << db.errmsg(); + LOG_ERROR << "Empty device ID: " << db.errmsg(); return false; } @@ -649,7 +773,7 @@ void SQLStorage::clearDeviceId() { SQLite3Guard db = dbConnection(); if (db.exec("DELETE FROM device_info;", nullptr, nullptr) != SQLITE_OK) { - LOG_ERROR << "Can't clear device ID: " << db.errmsg(); + LOG_ERROR << "Failed to clear device ID: " << db.errmsg(); return; } } @@ -657,38 +781,36 @@ void SQLStorage::clearDeviceId() { void SQLStorage::storeEcuRegistered() { SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); - return; - } + db.beginTransaction(); auto statement = db.prepareStatement("SELECT count(*) FROM device_info;"); if (statement.step() != SQLITE_ROW) { - throw std::runtime_error("Could not get device_info count"); + throw SQLException(std::string("Failed to get device_info count: ") + db.errmsg()); } if (statement.get_result_col_int(0) != 1) { - throw std::runtime_error("Cannot set ecu registered if no device_info set"); + throw SQLException("Failed to set ECU registered because device info is empty."); } std::string req = "UPDATE device_info SET is_registered = 1"; if (db.exec(req.c_str(), nullptr, nullptr) != SQLITE_OK) { - LOG_ERROR << "Can't set is_registered: " << db.errmsg(); + LOG_ERROR << "Failed to set registration flag: " << db.errmsg(); return; } db.commitTransaction(); } -bool SQLStorage::loadEcuRegistered() { +bool SQLStorage::loadEcuRegistered() const { SQLite3Guard db = dbConnection(); auto statement = db.prepareStatement("SELECT is_registered FROM device_info LIMIT 1;"); int result = statement.step(); if (result == SQLITE_DONE) { + LOG_TRACE << "Registration flag not found in database"; return false; } else if (result != SQLITE_ROW) { - LOG_ERROR << "Can't get is_registered in device_info " << db.errmsg(); + LOG_ERROR << "Failed to get registration flag: " << db.errmsg(); return false; } @@ -701,7 +823,7 @@ void SQLStorage::clearEcuRegistered() { // note: if the table is empty, nothing is done but that's fine std::string req = "UPDATE device_info SET is_registered = 0"; if (db.exec(req.c_str(), nullptr, nullptr) != SQLITE_OK) { - LOG_ERROR << "Can't set is_registered: " << db.errmsg(); + LOG_ERROR << "Failed to clear registration flag: " << db.errmsg(); return; } } @@ -711,12 +833,12 @@ void SQLStorage::storeNeedReboot() { auto statement = db.prepareStatement("INSERT OR REPLACE INTO need_reboot(unique_mark,flag) VALUES(0,?);", 1); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set need_reboot: " << db.errmsg(); + LOG_ERROR << "Failed to set reboot flag: " << db.errmsg(); return; } } -bool SQLStorage::loadNeedReboot(bool* need_reboot) { +bool SQLStorage::loadNeedReboot(bool* need_reboot) const { SQLite3Guard db = dbConnection(); auto statement = db.prepareStatement("SELECT flag FROM need_reboot LIMIT 1;"); @@ -728,7 +850,7 @@ bool SQLStorage::loadNeedReboot(bool* need_reboot) { } return true; } else if (result != SQLITE_ROW) { - LOG_ERROR << "Can't get need_reboot: " << db.errmsg(); + LOG_ERROR << "Failed to get reboot flag: " << db.errmsg(); return false; } @@ -744,33 +866,30 @@ void SQLStorage::clearNeedReboot() { SQLite3Guard db = dbConnection(); if (db.exec("DELETE FROM need_reboot;", nullptr, nullptr) != SQLITE_OK) { - LOG_ERROR << "Can't clear need_reboot: " << db.errmsg(); + LOG_ERROR << "Failed to clear reboot flag: " << db.errmsg(); return; } } void SQLStorage::storeEcuSerials(const EcuSerials& serials) { - if (serials.size() >= 1) { + if (!serials.empty()) { SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); - return; - } + db.beginTransaction(); - if (db.exec("DELETE FROM ecu_serials;", nullptr, nullptr) != SQLITE_OK) { - LOG_ERROR << "Can't clear ecu_serials: " << db.errmsg(); + if (db.exec("DELETE FROM ecus;", nullptr, nullptr) != SQLITE_OK) { + LOG_ERROR << "Failed to clear ECU serials: " << db.errmsg(); return; } - // first is the primary + // first is the Primary std::string serial = serials[0].first.ToString(); std::string hwid = serials[0].second.ToString(); { auto statement = db.prepareStatement( - "INSERT INTO ecu_serials(id, serial,hardware_id,is_primary) VALUES (0, ?,?,1);", serial, hwid); + "INSERT INTO ecus(id, serial,hardware_id,is_primary) VALUES (0, ?,?,1);", serial, hwid); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set ecu_serial: " << db.errmsg(); + LOG_ERROR << "Failed to store ECU serials: " << db.errmsg(); return; } @@ -779,18 +898,18 @@ void SQLStorage::storeEcuSerials(const EcuSerials& serials) { "UPDATE installed_versions SET ecu_serial = ? WHERE ecu_serial = '';", serial); if (statement_ivupdate.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set ecu_serial: " << db.errmsg(); + LOG_ERROR << "Failed to store ECU serials: " << db.errmsg(); return; } } for (auto it = serials.cbegin() + 1; it != serials.cend(); it++) { auto statement = db.prepareStatement( - "INSERT INTO ecu_serials(id,serial,hardware_id) VALUES (?,?,?);", it - serials.cbegin(), it->first.ToString(), + "INSERT INTO ecus(id,serial,hardware_id) VALUES (?,?,?);", it - serials.cbegin(), it->first.ToString(), it->second.ToString()); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set ecu_serial: " << db.errmsg(); + LOG_ERROR << "Failed to store ECU serials: " << db.errmsg(); return; } } @@ -799,11 +918,11 @@ void SQLStorage::storeEcuSerials(const EcuSerials& serials) { } } -bool SQLStorage::loadEcuSerials(EcuSerials* serials) { +bool SQLStorage::loadEcuSerials(EcuSerials* serials) const { SQLite3Guard db = dbConnection(); - // order by auto-incremented primary key so that the ecu order is kept constant - auto statement = db.prepareStatement("SELECT serial, hardware_id FROM ecu_serials ORDER BY id;"); + // order by auto-incremented Primary key so that the ECU order is kept constant + auto statement = db.prepareStatement("SELECT serial, hardware_id FROM ecus ORDER BY id;"); int statement_state; EcuSerials new_serials; @@ -819,7 +938,7 @@ bool SQLStorage::loadEcuSerials(EcuSerials* serials) { } if (statement_state != SQLITE_DONE) { - LOG_ERROR << "Can't get ecu_serials: " << db.errmsg(); + LOG_ERROR << "Failed to get ECU serials: " << db.errmsg(); return false; } @@ -833,43 +952,70 @@ bool SQLStorage::loadEcuSerials(EcuSerials* serials) { void SQLStorage::clearEcuSerials() { SQLite3Guard db = dbConnection(); - if (db.exec("DELETE FROM ecu_serials;", nullptr, nullptr) != SQLITE_OK) { - LOG_ERROR << "Can't clear ecu_serials: " << db.errmsg(); + db.beginTransaction(); + + if (db.exec("DELETE FROM ecus;", nullptr, nullptr) != SQLITE_OK) { + LOG_ERROR << "Failed to clear ECU serials: " << db.errmsg(); + return; + } + + if (db.exec("DELETE FROM secondary_ecus;", nullptr, nullptr) != SQLITE_OK) { + LOG_ERROR << "Failed to clear Secondary ECUs: " << db.errmsg(); return; } + + db.commitTransaction(); } -void SQLStorage::storeMisconfiguredEcus(const std::vector& ecus) { - if (ecus.size() >= 1) { - SQLite3Guard db = dbConnection(); +void SQLStorage::storeCachedEcuManifest(const Uptane::EcuSerial& ecu_serial, const std::string& manifest) { + SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); - return; - } + auto statement = db.prepareStatement( + "UPDATE secondary_ecus SET manifest = ? WHERE (serial = ?);", manifest, ecu_serial.ToString()); + if (statement.step() != SQLITE_DONE || sqlite3_changes(db.get()) != 1) { + LOG_ERROR << "Failed to store Secondary manifest: " << db.errmsg(); + return; + } +} - if (db.exec("DELETE FROM misconfigured_ecus;", nullptr, nullptr) != SQLITE_OK) { - LOG_ERROR << "Can't clear misconfigured_ecus: " << db.errmsg(); - return; - } +bool SQLStorage::loadCachedEcuManifest(const Uptane::EcuSerial& ecu_serial, std::string* manifest) const { + SQLite3Guard db = dbConnection(); - std::vector::const_iterator it; - for (it = ecus.begin(); it != ecus.end(); it++) { - auto statement = db.prepareStatement( - "INSERT INTO misconfigured_ecus VALUES (?,?,?);", it->serial.ToString(), it->hardware_id.ToString(), - static_cast(it->state)); + std::string stmanifest; - if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set misconfigured_ecus: " << db.errmsg(); - return; - } - } + bool empty = false; - db.commitTransaction(); + auto statement = db.prepareStatement("SELECT manifest FROM secondary_ecus WHERE (serial = ?);", + ecu_serial.ToString()); + + if (statement.step() != SQLITE_ROW) { + LOG_WARNING << "Could not find manifest for ECU " << ecu_serial; + return false; + } else { + stmanifest = statement.get_result_col_str(0).value_or(""); + + empty = stmanifest.empty(); + } + + if (manifest != nullptr) { + *manifest = std::move(stmanifest); + } + + return !empty; +} + +void SQLStorage::saveMisconfiguredEcu(const MisconfiguredEcu& ecu) { + SQLite3Guard db = dbConnection(); + + auto statement = db.prepareStatement( + "INSERT OR REPLACE INTO misconfigured_ecus VALUES (?,?,?);", ecu.serial.ToString(), ecu.hardware_id.ToString(), + static_cast(ecu.state)); + if (statement.step() != SQLITE_DONE) { + throw SQLException(db.errmsg().insert(0, "Failed to set misconfigured ECUs: ")); } } -bool SQLStorage::loadMisconfiguredEcus(std::vector* ecus) { +bool SQLStorage::loadMisconfiguredEcus(std::vector* ecus) const { SQLite3Guard db = dbConnection(); auto statement = db.prepareStatement("SELECT serial, hardware_id, state FROM misconfigured_ecus;"); @@ -889,7 +1035,7 @@ bool SQLStorage::loadMisconfiguredEcus(std::vector* ecus) { } if (statement_state != SQLITE_DONE) { - LOG_ERROR << "Can't get misconfigured_ecus: " << db.errmsg(); + LOG_ERROR << "Failed to get misconfigured ECUs: " << db.errmsg(); return false; } @@ -904,7 +1050,7 @@ void SQLStorage::clearMisconfiguredEcus() { SQLite3Guard db = dbConnection(); if (db.exec("DELETE FROM misconfigured_ecus;", nullptr, nullptr) != SQLITE_OK) { - LOG_ERROR << "Can't clear misconfigured_ecus: " << db.errmsg(); + LOG_ERROR << "Failed to clear misconfigured ECUs: " << db.errmsg(); return; } } @@ -913,25 +1059,22 @@ void SQLStorage::saveInstalledVersion(const std::string& ecu_serial, const Uptan InstalledVersionUpdateMode update_mode) { SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); - return; - } + db.beginTransaction(); // either adds a new entry or update the last one's status - // empty serial: use primary + // empty serial: use Primary std::string ecu_serial_real = ecu_serial; if (ecu_serial_real.empty()) { - auto statement = db.prepareStatement("SELECT serial FROM ecu_serials WHERE is_primary = 1;"); + auto statement = db.prepareStatement("SELECT serial FROM ecus WHERE is_primary = 1;"); if (statement.step() == SQLITE_ROW) { ecu_serial_real = statement.get_result_col_str(0).value(); } else { - LOG_WARNING << "Could not find primary ecu serial, set to lazy init mode"; + LOG_WARNING << "Could not find Primary ECU serial, set to lazy init mode"; } } - std::string hashes_encoded = Uptane::Hash::encodeVector(target.hashes()); + std::string hashes_encoded = Hash::encodeVector(target.hashes()); // get the last time this version was installed on this ecu boost::optional old_id; @@ -960,7 +1103,7 @@ void SQLStorage::saveInstalledVersion(const std::string& ecu_serial, const Uptan auto statement = db.prepareStatement( "UPDATE installed_versions SET is_current = 0, is_pending = 0 WHERE ecu_serial = ?", ecu_serial_real); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set installed_versions: " << db.errmsg(); + LOG_ERROR << "Failed to save installed versions: " << db.errmsg(); return; } } else if (update_mode == InstalledVersionUpdateMode::kPending) { @@ -968,7 +1111,7 @@ void SQLStorage::saveInstalledVersion(const std::string& ecu_serial, const Uptan auto statement = db.prepareStatement( "UPDATE installed_versions SET is_pending = 0 WHERE ecu_serial = ?", ecu_serial_real); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set installed_versions: " << db.errmsg(); + LOG_ERROR << "Failed to save installed versions: " << db.errmsg(); return; } } @@ -982,11 +1125,11 @@ void SQLStorage::saveInstalledVersion(const std::string& ecu_serial, const Uptan static_cast(update_mode == InstalledVersionUpdateMode::kCurrent || old_was_installed), old_id.value()); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set installed_versions: " << db.errmsg(); + LOG_ERROR << "Failed to save installed versions: " << db.errmsg(); return; } } else { - std::string custom = Json::FastWriter().write(target.custom_data()); + std::string custom = Utils::jsonToCanonicalStr(target.custom_data()); auto statement = db.prepareStatement( "INSERT INTO installed_versions(ecu_serial, sha256, name, hashes, length, custom_meta, correlation_id, " @@ -997,7 +1140,7 @@ void SQLStorage::saveInstalledVersion(const std::string& ecu_serial, const Uptan static_cast(update_mode == InstalledVersionUpdateMode::kCurrent)); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set installed_versions: " << db.errmsg(); + LOG_ERROR << "Failed to save installed versions: " << db.errmsg(); return; } } @@ -1006,29 +1149,34 @@ void SQLStorage::saveInstalledVersion(const std::string& ecu_serial, const Uptan } static void loadEcuMap(SQLite3Guard& db, std::string& ecu_serial, Uptane::EcuMap& ecu_map) { + // The Secondary only knows about itself and in its database it is considered + // a Primary, for better or worse. if (ecu_serial.empty()) { - auto statement = db.prepareStatement("SELECT serial FROM ecu_serials WHERE is_primary = 1;"); + auto statement = db.prepareStatement("SELECT serial FROM ecus WHERE is_primary = 1;"); if (statement.step() == SQLITE_ROW) { ecu_serial = statement.get_result_col_str(0).value(); + } else if (statement.step() == SQLITE_DONE) { + LOG_DEBUG << "No serial found in database for this ECU, defaulting to empty serial"; } else { - LOG_WARNING << "Could not find primary ecu serial, defaulting to empty serial: " << db.errmsg(); + LOG_ERROR << "Error getting serial for this ECU, defaulting to empty serial: " << db.errmsg(); } } - { - auto statement = - db.prepareStatement("SELECT hardware_id FROM ecu_serials WHERE serial = ?;", ecu_serial); + if (!ecu_serial.empty()) { + auto statement = db.prepareStatement("SELECT hardware_id FROM ecus WHERE serial = ?;", ecu_serial); if (statement.step() == SQLITE_ROW) { ecu_map.insert( {Uptane::EcuSerial(ecu_serial), Uptane::HardwareIdentifier(statement.get_result_col_str(0).value())}); + } else if (statement.step() == SQLITE_DONE) { + LOG_DEBUG << "No hardware ID found in database for ECU serial " << ecu_serial; } else { - LOG_WARNING << "Could not find hardware_id for serial " << ecu_serial << ": " << db.errmsg(); + LOG_ERROR << "Error getting hardware ID for ECU serial " << ecu_serial << ": " << db.errmsg(); } } } bool SQLStorage::loadInstallationLog(const std::string& ecu_serial, std::vector* log, - bool only_installed) { + bool only_installed) const { SQLite3Guard db = dbConnection(); std::string ecu_serial_real = ecu_serial; @@ -1062,23 +1210,24 @@ bool SQLStorage::loadInstallationLog(const std::string& ecu_serial, std::vector< // note: sha256 should always be present and is used to uniquely identify // a version. It should normally be part of the hash list as well. - std::vector hashes = Uptane::Hash::decodeVector(hashes_str); + std::vector hashes = Hash::decodeVector(hashes_str); - auto find_sha256 = std::find_if(hashes.cbegin(), hashes.cend(), - [](const Uptane::Hash& h) { return h.type() == Uptane::Hash::Type::kSha256; }); + auto find_sha256 = + std::find_if(hashes.cbegin(), hashes.cend(), [](const Hash& h) { return h.type() == Hash::Type::kSha256; }); if (find_sha256 == hashes.cend()) { LOG_WARNING << "No sha256 in hashes list"; - hashes.emplace_back(Uptane::Hash::Type::kSha256, sha256); + hashes.emplace_back(Hash::Type::kSha256, sha256); } Uptane::Target t(filename, ecu_map, hashes, static_cast(length), correlation_id); if (!custom_str.empty()) { - Json::Reader reader; + std::istringstream css(custom_str); + std::string errs; Json::Value custom; - if (reader.parse(custom_str, custom)) { + if (Json::parseFromStream(Json::CharReaderBuilder(), css, &custom, nullptr)) { t.updateCustom(custom); } else { - LOG_ERROR << "Unable to parse custom data: " << reader.getFormatedErrorMessages(); + LOG_ERROR << "Unable to parse custom data: " << errs; } } new_log.emplace_back(t); @@ -1086,13 +1235,13 @@ bool SQLStorage::loadInstallationLog(const std::string& ecu_serial, std::vector< ids_map[id] = k; k++; } catch (const boost::bad_optional_access&) { - LOG_ERROR << "Incompleted installed version, keeping old one"; + LOG_ERROR << "Incomplete installed version list; keeping previous entries."; return false; } } if (statement_state != SQLITE_DONE) { - LOG_ERROR << "Can't get installed_versions: " << db.errmsg(); + LOG_ERROR << "Failed to get installed versions: " << db.errmsg(); return false; } @@ -1106,7 +1255,7 @@ bool SQLStorage::loadInstallationLog(const std::string& ecu_serial, std::vector< } bool SQLStorage::loadInstalledVersions(const std::string& ecu_serial, boost::optional* current_version, - boost::optional* pending_version) { + boost::optional* pending_version) const { SQLite3Guard db = dbConnection(); std::string ecu_serial_real = ecu_serial; @@ -1123,22 +1272,23 @@ bool SQLStorage::loadInstalledVersions(const std::string& ecu_serial, boost::opt // note: sha256 should always be present and is used to uniquely identify // a version. It should normally be part of the hash list as well. - std::vector hashes = Uptane::Hash::decodeVector(hashes_str); + std::vector hashes = Hash::decodeVector(hashes_str); - auto find_sha256 = std::find_if(hashes.cbegin(), hashes.cend(), - [](const Uptane::Hash& h) { return h.type() == Uptane::Hash::Type::kSha256; }); + auto find_sha256 = + std::find_if(hashes.cbegin(), hashes.cend(), [](const Hash& h) { return h.type() == Hash::Type::kSha256; }); if (find_sha256 == hashes.cend()) { LOG_WARNING << "No sha256 in hashes list"; - hashes.emplace_back(Uptane::Hash::Type::kSha256, sha256); + hashes.emplace_back(Hash::Type::kSha256, sha256); } Uptane::Target t(filename, ecu_map, hashes, static_cast(length), correlation_id); if (!custom_str.empty()) { - Json::Reader reader; + std::istringstream css(custom_str); Json::Value custom; - if (reader.parse(custom_str, custom)) { + std::string errs; + if (Json::parseFromStream(Json::CharReaderBuilder(), css, &custom, &errs)) { t.updateCustom(custom); } else { - LOG_ERROR << "Unable to parse custom data: " << reader.getFormatedErrorMessages(); + LOG_ERROR << "Unable to parse custom data: " << errs; } } @@ -1159,7 +1309,7 @@ bool SQLStorage::loadInstalledVersions(const std::string& ecu_serial, boost::opt return false; } } else { - LOG_TRACE << "Cannot get current installed version: " << db.errmsg(); + LOG_TRACE << "Failed to get current installed version: " << db.errmsg(); *current_version = boost::none; } } @@ -1178,7 +1328,7 @@ bool SQLStorage::loadInstalledVersions(const std::string& ecu_serial, boost::opt return false; } } else { - LOG_TRACE << "Cannot get pending installed version: " << db.errmsg(); + LOG_TRACE << "Failed to get pending installed version: " << db.errmsg(); *pending_version = boost::none; } } @@ -1191,18 +1341,45 @@ bool SQLStorage::hasPendingInstall() { auto statement = db.prepareStatement("SELECT count(*) FROM installed_versions where is_pending = 1"); if (statement.step() != SQLITE_ROW) { - LOG_ERROR << "Can't get tables count: " << db.errmsg(); - throw std::runtime_error("Could not count pending installations"); + LOG_ERROR << "Failed to get pending installation count: " << db.errmsg(); + throw SQLException(std::string("Failed to get pending installation count: ") + db.errmsg()); } return statement.get_result_col_int(0) > 0; } +void SQLStorage::getPendingEcus(std::vector>* pendingEcus) { + SQLite3Guard db = dbConnection(); + + auto statement = db.prepareStatement("SELECT ecu_serial, sha256 FROM installed_versions where is_pending = 1"); + int statement_result = statement.step(); + if (statement_result != SQLITE_DONE && statement_result != SQLITE_ROW) { + throw SQLException("Failed to get ECUs with a pending target installation: " + db.errmsg()); + } + + std::vector> ecu_res; + + if (statement_result == SQLITE_DONE) { + // if there are not any records in the DB + return; + } + + for (; statement_result != SQLITE_DONE; statement_result = statement.step()) { + std::string ecu_serial = statement.get_result_col_str(0).value(); + std::string hash = statement.get_result_col_str(1).value(); + ecu_res.emplace_back(std::make_pair(Uptane::EcuSerial(ecu_serial), Hash(Hash::Type::kSha256, hash))); + } + + if (pendingEcus != nullptr) { + *pendingEcus = std::move(ecu_res); + } +} + void SQLStorage::clearInstalledVersions() { SQLite3Guard db = dbConnection(); if (db.exec("DELETE FROM installed_versions;", nullptr, nullptr) != SQLITE_OK) { - LOG_ERROR << "Can't clear installed_versions: " << db.errmsg(); + LOG_ERROR << "Failed to clear installed versions: " << db.errmsg(); return; } } @@ -1216,29 +1393,29 @@ void SQLStorage::saveEcuInstallationResult(const Uptane::EcuSerial& ecu_serial, "(?,?,?,?);", ecu_serial.ToString(), static_cast(result.success), result.result_code.toRepr(), result.description); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set ecu installation result: " << db.errmsg(); + LOG_ERROR << "Failed to set ECU installation result: " << db.errmsg(); return; } } bool SQLStorage::loadEcuInstallationResults( - std::vector>* results) { + std::vector>* results) const { SQLite3Guard db = dbConnection(); std::vector> ecu_res; - // keep the same order as in ecu_serials (start with primary) + // keep the same order as in ECUs (start with Primary) auto statement = db.prepareStatement( - "SELECT ecu_serial, success, result_code, description FROM ecu_installation_results INNER JOIN ecu_serials ON " - "ecu_serials.serial = ecu_serial ORDER BY ecu_serials.id;"); + "SELECT ecu_serial, success, result_code, description FROM ecu_installation_results INNER JOIN ecus ON " + "ecus.serial = ecu_serial ORDER BY ecus.id;"); int statement_result = statement.step(); if (statement_result != SQLITE_DONE && statement_result != SQLITE_ROW) { - LOG_ERROR << "Can't get ecu_installation_results: " << db.errmsg(); + LOG_ERROR << "Failed to get ECU installation results: " << db.errmsg(); return false; } if (statement_result == SQLITE_DONE) { - // if there are no any record in the DB + // if there are not any records in the DB return false; } @@ -1272,13 +1449,23 @@ void SQLStorage::storeDeviceInstallationResult(const data::InstallationResult& r "VALUES (0,?,?,?,?,?);", static_cast(result.success), result.result_code.toRepr(), result.description, raw_report, correlation_id); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Can't set device installation result: " << db.errmsg(); + LOG_ERROR << "Failed to store device installation result: " << db.errmsg(); return; } } +bool SQLStorage::storeDeviceInstallationRawReport(const std::string& raw_report) { + SQLite3Guard db = dbConnection(); + auto statement = db.prepareStatement("UPDATE device_installation_result SET raw_report=?;", raw_report); + if (statement.step() != SQLITE_DONE || sqlite3_changes(db.get()) != 1) { + LOG_ERROR << "Failed to store device installation raw report: " << db.errmsg(); + return false; + } + return true; +} + bool SQLStorage::loadDeviceInstallationResult(data::InstallationResult* result, std::string* raw_report, - std::string* correlation_id) { + std::string* correlation_id) const { SQLite3Guard db = dbConnection(); data::InstallationResult dev_res; @@ -1289,10 +1476,10 @@ bool SQLStorage::loadDeviceInstallationResult(data::InstallationResult* result, "SELECT success, result_code, description, raw_report, correlation_id FROM device_installation_result;"); int statement_result = statement.step(); if (statement_result == SQLITE_DONE) { - LOG_TRACE << "No device installation result in db"; + LOG_TRACE << "Device installation result not found in database"; return false; } else if (statement_result != SQLITE_ROW) { - LOG_ERROR << "Can't get device_installation_result: " << db.errmsg(); + LOG_ERROR << "Failed to get device installation result: " << db.errmsg(); return false; } @@ -1323,299 +1510,224 @@ bool SQLStorage::loadDeviceInstallationResult(data::InstallationResult* result, return true; } -void SQLStorage::clearInstallationResults() { +void SQLStorage::saveEcuReportCounter(const Uptane::EcuSerial& ecu_serial, const int64_t counter) { SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); - return; - } - if (db.exec("DELETE FROM device_installation_result;", nullptr, nullptr) != SQLITE_OK) { - LOG_ERROR << "Can't clear device_installation_result: " << db.errmsg(); - return; - } - - if (db.exec("DELETE FROM ecu_installation_results;", nullptr, nullptr) != SQLITE_OK) { - LOG_ERROR << "Can't clear ecu_installation_results: " << db.errmsg(); + auto statement = db.prepareStatement( + "INSERT OR REPLACE INTO ecu_report_counter (ecu_serial, counter) VALUES " + "(?,?);", + ecu_serial.ToString(), counter); + if (statement.step() != SQLITE_DONE) { + LOG_ERROR << "Failed to set ECU report counter: " << db.errmsg(); return; } - - db.commitTransaction(); } -boost::optional> SQLStorage::checkTargetFile(const Uptane::Target& target) const { +bool SQLStorage::loadEcuReportCounter(std::vector>* results) const { SQLite3Guard db = dbConnection(); - auto statement = db.prepareStatement( - "SELECT real_size, sha256, sha512, filename FROM target_images WHERE targetname = ?;", target.filename()); + std::vector> ecu_cnt; - int statement_state; - while ((statement_state = statement.step()) == SQLITE_ROW) { - auto sha256 = statement.get_result_col_str(1); - auto sha512 = statement.get_result_col_str(2); - if ((*sha256).empty() && (*sha512).empty()) { - // Old aktualizr didn't save checksums, this could require to redownload old images. - LOG_WARNING << "Image without checksum: " << target.filename(); - continue; - } - bool sha256_match = false; - bool sha512_match = false; - if (!(*sha256).empty()) { - if (target.MatchHash(Uptane::Hash(Uptane::Hash::Type::kSha256, *sha256))) { - sha256_match = true; - } - } + // keep the same order as in ECUs (start with Primary) + auto statement = db.prepareStatement( + "SELECT ecu_serial, counter FROM ecu_report_counter INNER JOIN ecus ON " + "ecus.serial = ecu_serial ORDER BY ecus.id;"); + int statement_result = statement.step(); + if (statement_result != SQLITE_DONE && statement_result != SQLITE_ROW) { + LOG_ERROR << "Failed to get ECU report counter: " << db.errmsg(); + return false; + } - if (!(*sha512).empty()) { - if (target.MatchHash(Uptane::Hash(Uptane::Hash::Type::kSha512, *sha512))) { - sha512_match = true; - } - } - if (((*sha256).empty() || sha256_match) && ((*sha512).empty() || sha512_match)) { - if (boost::filesystem::exists(images_path_ / *statement.get_result_col_str(3))) { - return {{static_cast(statement.get_result_col_int(0)), *statement.get_result_col_str(3)}}; - } else { - return boost::none; - } + if (statement_result == SQLITE_DONE) { + // if there are not any records in the DB + return false; + } + + for (; statement_result != SQLITE_DONE; statement_result = statement.step()) { + try { + std::string ecu_serial = statement.get_result_col_str(0).value(); + int64_t counter = statement.get_result_col_int(1); + + ecu_cnt.emplace_back(Uptane::EcuSerial(ecu_serial), counter); + } catch (const boost::bad_optional_access&) { + return false; } } - if (statement_state == SQLITE_DONE) { - LOG_INFO << "No file '" + target.filename() << "' with matched hash in the database"; - return boost::none; + if (results != nullptr) { + *results = std::move(ecu_cnt); } - assert(statement_state != SQLITE_ROW); // from the previous loop precondition - LOG_ERROR << "Statement step failure: " << db.errmsg(); - return boost::none; + return true; } -class SQLTargetWHandle : public StorageTargetWHandle { - public: - SQLTargetWHandle(const SQLStorage& storage, Uptane::Target target) - : db_(storage.dbPath()), target_(std::move(target)) { - StorageTargetWHandle::WriteError exc("could not save file " + target_.filename() + " to the filesystem"); - - std::string sha256Hash; - std::string sha512Hash; - for (const auto& hash : target_.hashes()) { - if (hash.type() == Uptane::Hash::Type::kSha256) { - sha256Hash = hash.HashString(); - } else if (hash.type() == Uptane::Hash::Type::kSha512) { - sha512Hash = hash.HashString(); - } - } - std::string filename = (storage.images_path_ / target_.hashes()[0].HashString()).string(); - auto statement = db_.prepareStatement( - "INSERT OR REPLACE INTO target_images (targetname, sha256, sha512, filename) VALUES ( ?, ?, ?, ?);", - target_.filename(), sha256Hash, sha512Hash, target_.hashes()[0].HashString()); - - if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Statement step failure: " << db_.errmsg(); - throw exc; - } - boost::filesystem::create_directories(storage.images_path_); - stream_.open(filename); - if (!stream_.good()) { - LOG_ERROR << "Could not open image for write: " << storage.images_path_ / target_.filename(); - throw exc; - } +void SQLStorage::saveReportEvent(const Json::Value& json_value) { + std::string json_string = Utils::jsonToCanonicalStr(json_value); + SQLite3Guard db = dbConnection(); + auto statement = db.prepareStatement( + "INSERT INTO report_events SELECT MAX(id) + 1, ? FROM report_events", json_string); + if (statement.step() != SQLITE_DONE) { + LOG_ERROR << "Failed to save report event: " << db.errmsg(); + return; } +} - ~SQLTargetWHandle() override { +bool SQLStorage::loadReportEvents(Json::Value* report_array, int64_t* id_max, int limit) const { + SQLite3Guard db = dbConnection(); + auto statement = db.prepareStatement("SELECT id, json_string FROM report_events LIMIT ?;", limit); + int statement_result = statement.step(); + if (statement_result != SQLITE_DONE && statement_result != SQLITE_ROW) { + LOG_ERROR << "Failed to get report events: " << db.errmsg(); + return false; + } + if (statement_result == SQLITE_DONE) { + // if there are not any records in the DB + return false; + } + *id_max = 0; + for (; statement_result != SQLITE_DONE; statement_result = statement.step()) { try { - SQLTargetWHandle::wcommit(); - } catch (std::exception& ex) { - LOG_ERROR << "Failed to commit to database: " << ex.what(); - } catch (...) { - LOG_ERROR << "Failed to commit to database: unknown error"; + int64_t id = statement.get_result_col_int(0); + std::string json_string = statement.get_result_col_str(1).value(); + std::istringstream jss(json_string); + Json::Value event_json; + std::string errs; + if (Json::parseFromStream(Json::CharReaderBuilder(), jss, &event_json, &errs)) { + report_array->append(event_json); + *id_max = (*id_max) > id ? (*id_max) : id; + } else { + LOG_ERROR << "Unable to parse event data: " << errs; + } + } catch (const boost::bad_optional_access&) { + return false; } } - size_t wfeed(const uint8_t* buf, size_t size) override { - stream_.write(reinterpret_cast(buf), static_cast(size)); - written_size_ += size; + return true; +} - return size; +void SQLStorage::deleteReportEvents(int64_t id_max) { + SQLite3Guard db = dbConnection(); + + auto statement = db.prepareStatement("DELETE FROM report_events WHERE id <= ?;", id_max); + if (statement.step() != SQLITE_DONE) { + LOG_ERROR << "Failed to clear report events: " << db.errmsg(); } +} - void wcommit() override { - if (stream_) { - stream_.close(); - auto statement = - db_.prepareStatement("UPDATE target_images SET real_size = ? WHERE targetname = ?;", - static_cast(written_size_), target_.filename()); +void SQLStorage::clearInstallationResults() { + SQLite3Guard db = dbConnection(); - int err = statement.step(); - if (err != SQLITE_DONE) { - LOG_ERROR << "Could not save size in db: " << db_.errmsg(); - throw StorageTargetWHandle::WriteError("could not update size of " + target_.filename() + " in sql storage"); - } - } + db.beginTransaction(); + + if (db.exec("DELETE FROM device_installation_result;", nullptr, nullptr) != SQLITE_OK) { + LOG_ERROR << "Failed to clear device installation result: " << db.errmsg(); + return; } - void wabort() noexcept override { - if (stream_) { - stream_.close(); - } - if (sqlite3_changes(db_.get()) > 0) { - auto statement = - db_.prepareStatement("DELETE FROM target_images WHERE targetname=?;", target_.filename()); - if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "could not delete " << target_.filename() << " from sql storage"; - } - } + if (db.exec("DELETE FROM ecu_installation_results;", nullptr, nullptr) != SQLITE_OK) { + LOG_ERROR << "Failed to clear ECU installation results: " << db.errmsg(); + return; } - friend class SQLTargetRHandle; + db.commitTransaction(); +} - private: - SQLTargetWHandle(const boost::filesystem::path& db_path, Uptane::Target target, - const boost::filesystem::path& image_path, const size_t& start_from = 0) - : db_(db_path), target_(std::move(target)) { - if (db_.get_rc() != SQLITE_OK) { - LOG_ERROR << "Can't open database: " << db_.errmsg(); - throw StorageTargetWHandle::WriteError("could not open sql storage"); - } - stream_.open(image_path.string(), std::ofstream::out | std::ofstream::app); - if (!stream_.good()) { - LOG_ERROR << "Could not open image for write: " << image_path; - throw StorageTargetWHandle::WriteError("could not open file for write: " + image_path.string()); - } +void SQLStorage::storeDeviceDataHash(const std::string& data_type, const std::string& hash) { + SQLite3Guard db = dbConnection(); - written_size_ = start_from; + auto statement = db.prepareStatement( + "INSERT OR REPLACE INTO device_data(data_type,hash) VALUES (?,?);", data_type, hash); + if (statement.step() != SQLITE_DONE) { + LOG_ERROR << "Failed to store " << data_type << " hash: " << db.errmsg(); + throw SQLException("Failed to store " + data_type + " hash: " + db.errmsg()); } - SQLite3Guard db_; - Uptane::Target target_; - std::ofstream stream_; -}; - -std::unique_ptr SQLStorage::allocateTargetFile(bool from_director, const Uptane::Target& target) { - (void)from_director; - return std::unique_ptr(new SQLTargetWHandle(*this, target)); } -class SQLTargetRHandle : public StorageTargetRHandle { - public: - SQLTargetRHandle(const SQLStorage& storage, Uptane::Target target) - : db_path_(storage.dbPath()), db_(db_path_), target_(std::move(target)), size_(0) { - StorageTargetRHandle::ReadError exc("could not read file " + target_.filename() + " from sql storage"); +bool SQLStorage::loadDeviceDataHash(const std::string& data_type, std::string* hash) const { + SQLite3Guard db = dbConnection(); - auto exists = storage.checkTargetFile(target_); - if (!exists) { - throw exc; - } + auto statement = + db.prepareStatement("SELECT hash FROM device_data WHERE data_type = ? LIMIT 1;", data_type); - size_ = exists->first; - partial_ = size_ < target_.length(); - image_path_ = storage.images_path_ / exists->second; - stream_.open(image_path_.string()); - if (!stream_.good()) { - LOG_ERROR << "Could not open image: " << storage.images_path_ / target_.filename(); - throw exc; - } + int result = statement.step(); + if (result == SQLITE_DONE) { + LOG_TRACE << data_type << " hash not found in database"; + return false; + } else if (result != SQLITE_ROW) { + LOG_ERROR << "Failed to get " << data_type << " hash: " << db.errmsg(); + return false; } - ~SQLTargetRHandle() override { SQLTargetRHandle::rclose(); } + if (hash != nullptr) { + *hash = statement.get_result_col_str(0).value(); + } - size_t rsize() const override { return size_; } + return true; +} - size_t rread(uint8_t* buf, size_t size) override { - stream_.read(reinterpret_cast(buf), static_cast(size)); - return static_cast(stream_.gcount()); - } +void SQLStorage::clearDeviceData() { + SQLite3Guard db = dbConnection(); - void rclose() noexcept override { - if (stream_.is_open()) { - stream_.close(); - } + if (db.exec("DELETE FROM device_data;", nullptr, nullptr) != SQLITE_OK) { + LOG_ERROR << "Failed to clear device data: " << db.errmsg(); + return; } +} - bool isPartial() const noexcept override { return partial_; } - std::unique_ptr toWriteHandle() override { - return std::unique_ptr(new SQLTargetWHandle(db_path_, target_, image_path_, size_)); +void SQLStorage::storeTargetFilename(const std::string& targetname, const std::string& filename) const { + SQLite3Guard db = dbConnection(); + auto statement = db.prepareStatement( + "INSERT OR REPLACE INTO target_images (targetname, filename) VALUES (?, ?);", targetname, filename); + + if (statement.step() != SQLITE_DONE) { + LOG_ERROR << "Failed to store Target filename: " << db.errmsg(); + throw SQLException(std::string("Failed to store Target filename: ") + db.errmsg()); } +} - private: - boost::filesystem::path db_path_; - SQLite3Guard db_; - Uptane::Target target_; - size_t size_; - bool partial_{false}; - boost::filesystem::path image_path_; - std::ifstream stream_; -}; +std::string SQLStorage::getTargetFilename(const std::string& targetname) const { + SQLite3Guard db = dbConnection(); -std::unique_ptr SQLStorage::openTargetFile(const Uptane::Target& target) { - return std_::make_unique(*this, target); + auto statement = + db.prepareStatement("SELECT filename FROM target_images WHERE targetname = ?;", targetname); + + switch (statement.step()) { + case SQLITE_ROW: + return statement.get_result_col_str(0).value(); + case SQLITE_DONE: + return {}; + default: + throw SQLException(db.errmsg().insert(0, "Failed to read Target filename from database: ")); + } } -std::vector SQLStorage::getTargetFiles() { +std::vector SQLStorage::getAllTargetNames() const { SQLite3Guard db = dbConnection(); - auto statement = db.prepareStatement<>("SELECT targetname, real_size, sha256, sha512 FROM target_images;"); + auto statement = db.prepareStatement<>("SELECT targetname FROM target_images;"); - std::vector v; + std::vector names; int result = statement.step(); while (result != SQLITE_DONE) { if (result != SQLITE_ROW) { - LOG_ERROR << "Statement step failure: " << db.errmsg(); - throw std::runtime_error("Error getting target files"); - } - - auto tname = statement.get_result_col_str(0).value(); - auto tsize = statement.get_result_col_int(1); - auto sha256 = statement.get_result_col_str(2).value(); - auto sha512 = statement.get_result_col_str(3).value(); - - std::vector hashes; - if (!sha256.empty()) { - hashes.emplace_back(Uptane::Hash::Type::kSha256, sha256); - } - if (!sha512.empty()) { - hashes.emplace_back(Uptane::Hash::Type::kSha512, sha512); + LOG_ERROR << "Failed to get Target filenames: " << db.errmsg(); + throw SQLException(std::string("Failed to get Target filenames: ") + db.errmsg()); } - v.emplace_back(tname, Uptane::EcuMap{}, hashes, static_cast(tsize)); - + names.push_back(statement.get_result_col_str(0).value()); result = statement.step(); } - - return v; + return names; } -void SQLStorage::removeTargetFile(const std::string& target_name) { +void SQLStorage::deleteTargetInfo(const std::string& targetname) const { SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); - return; - } - - auto statement = - db.prepareStatement("SELECT filename FROM target_images WHERE targetname = ?;", target_name); - - if (statement.step() != SQLITE_ROW) { - LOG_ERROR << "Statement step failure: " << db.errmsg(); - throw std::runtime_error("Could not find target file"); - } - - std::string filename = statement.get_result_col_str(0).value(); - - statement = db.prepareStatement("DELETE FROM target_images WHERE targetname=?;", target_name); + auto statement = db.prepareStatement("DELETE FROM target_images WHERE targetname=?;", targetname); if (statement.step() != SQLITE_DONE) { - LOG_ERROR << "Statement step failure: " << db.errmsg(); - throw std::runtime_error("Could not remove target file"); + LOG_ERROR << "Failed to clear Target filenames: " << db.errmsg(); + throw SQLException(std::string("Failed to clear Target filenames: ") + db.errmsg()); } - try { - boost::filesystem::remove(images_path_ / filename); - } catch (std::exception& e) { - LOG_ERROR << "Could not remove target file"; - throw; - } - - db.commitTransaction(); } - -void SQLStorage::cleanUp() { boost::filesystem::remove_all(dbPath()); } diff --git a/src/libaktualizr/storage/sqlstorage.h b/src/libaktualizr/storage/sqlstorage.h index 2b7bfe9082..21fe33208d 100644 --- a/src/libaktualizr/storage/sqlstorage.h +++ b/src/libaktualizr/storage/sqlstorage.h @@ -1,7 +1,6 @@ #ifndef SQLSTORAGE_H_ #define SQLSTORAGE_H_ -#include #include #include @@ -19,80 +18,101 @@ class SQLStorage : public SQLStorageBase, public INvStorage { public: friend class SQLTargetWHandle; friend class SQLTargetRHandle; - explicit SQLStorage(const StorageConfig& config, bool readonly); + explicit SQLStorage(const StorageConfig& config, bool readonly, + StorageClient storage_client = StorageClient::kUptane); ~SQLStorage() override = default; + SQLStorage(const SQLStorage&) = delete; + SQLStorage(SQLStorage&&) = delete; + SQLStorage& operator=(const SQLStorage&) = delete; + SQLStorage& operator=(SQLStorage&&) = delete; void storePrimaryKeys(const std::string& public_key, const std::string& private_key) override; - bool loadPrimaryKeys(std::string* public_key, std::string* private_key) override; - bool loadPrimaryPublic(std::string* public_key) override; - bool loadPrimaryPrivate(std::string* private_key) override; + bool loadPrimaryKeys(std::string* public_key, std::string* private_key) const override; + bool loadPrimaryPublic(std::string* public_key) const override; + bool loadPrimaryPrivate(std::string* private_key) const override; void clearPrimaryKeys() override; + void saveSecondaryInfo(const Uptane::EcuSerial& ecu_serial, const std::string& sec_type, + const PublicKey& public_key) override; + void saveSecondaryData(const Uptane::EcuSerial& ecu_serial, const std::string& data) override; + bool loadSecondaryInfo(const Uptane::EcuSerial& ecu_serial, SecondaryInfo* secondary) const override; + bool loadSecondariesInfo(std::vector* secondaries) const override; + void storeTlsCreds(const std::string& ca, const std::string& cert, const std::string& pkey) override; void storeTlsCa(const std::string& ca) override; void storeTlsCert(const std::string& cert) override; void storeTlsPkey(const std::string& pkey) override; - bool loadTlsCreds(std::string* ca, std::string* cert, std::string* pkey) override; + bool loadTlsCreds(std::string* ca, std::string* cert, std::string* pkey) const override; void clearTlsCreds() override; - bool loadTlsCa(std::string* ca) override; - bool loadTlsCert(std::string* cert) override; - bool loadTlsPkey(std::string* pkey) override; + bool loadTlsCa(std::string* ca) const override; + bool loadTlsCert(std::string* cert) const override; + bool loadTlsPkey(std::string* pkey) const override; void storeRoot(const std::string& data, Uptane::RepositoryType repo, Uptane::Version version) override; - bool loadRoot(std::string* data, Uptane::RepositoryType repo, Uptane::Version version) override; + bool loadRoot(std::string* data, Uptane::RepositoryType repo, Uptane::Version version) const override; void storeNonRoot(const std::string& data, Uptane::RepositoryType repo, Uptane::Role role) override; - bool loadNonRoot(std::string* data, Uptane::RepositoryType repo, Uptane::Role role) override; + bool loadNonRoot(std::string* data, Uptane::RepositoryType repo, Uptane::Role role) const override; void clearNonRootMeta(Uptane::RepositoryType repo) override; void clearMetadata() override; void storeDelegation(const std::string& data, Uptane::Role role) override; - bool loadDelegation(std::string* data, Uptane::Role role) override; + bool loadDelegation(std::string* data, Uptane::Role role) const override; bool loadAllDelegations(std::vector>& data) const override; void deleteDelegation(Uptane::Role role) override; void clearDelegations() override; void storeDeviceId(const std::string& device_id) override; - bool loadDeviceId(std::string* device_id) override; + bool loadDeviceId(std::string* device_id) const override; void clearDeviceId() override; void storeEcuSerials(const EcuSerials& serials) override; - bool loadEcuSerials(EcuSerials* serials) override; + bool loadEcuSerials(EcuSerials* serials) const override; void clearEcuSerials() override; - void storeMisconfiguredEcus(const std::vector& ecus) override; - bool loadMisconfiguredEcus(std::vector* ecus) override; + void storeCachedEcuManifest(const Uptane::EcuSerial& ecu_serial, const std::string& manifest) override; + bool loadCachedEcuManifest(const Uptane::EcuSerial& ecu_serial, std::string* manifest) const override; + void saveMisconfiguredEcu(const MisconfiguredEcu& ecu) override; + bool loadMisconfiguredEcus(std::vector* ecus) const override; void clearMisconfiguredEcus() override; void storeEcuRegistered() override; - bool loadEcuRegistered() override; + bool loadEcuRegistered() const override; void clearEcuRegistered() override; void storeNeedReboot() override; - bool loadNeedReboot(bool* need_reboot) override; + bool loadNeedReboot(bool* need_reboot) const override; void clearNeedReboot() override; void saveInstalledVersion(const std::string& ecu_serial, const Uptane::Target& target, InstalledVersionUpdateMode update_mode) override; bool loadInstalledVersions(const std::string& ecu_serial, boost::optional* current_version, - boost::optional* pending_version) override; + boost::optional* pending_version) const override; bool loadInstallationLog(const std::string& ecu_serial, std::vector* log, - bool only_installed) override; + bool only_installed) const override; bool hasPendingInstall() override; + void getPendingEcus(std::vector>* pendingEcus) override; void clearInstalledVersions() override; void saveEcuInstallationResult(const Uptane::EcuSerial& ecu_serial, const data::InstallationResult& result) override; bool loadEcuInstallationResults( - std::vector>* results) override; + std::vector>* results) const override; void storeDeviceInstallationResult(const data::InstallationResult& result, const std::string& raw_report, const std::string& correlation_id) override; + bool storeDeviceInstallationRawReport(const std::string& raw_report) override; bool loadDeviceInstallationResult(data::InstallationResult* result, std::string* raw_report, - std::string* correlation_id) override; + std::string* correlation_id) const override; + void saveEcuReportCounter(const Uptane::EcuSerial& ecu_serial, int64_t counter) override; + bool loadEcuReportCounter(std::vector>* results) const override; + void saveReportEvent(const Json::Value& json_value) override; + bool loadReportEvents(Json::Value* report_array, int64_t* id_max, int limit = -1) const override; + void deleteReportEvents(int64_t id_max) override; void clearInstallationResults() override; - std::unique_ptr allocateTargetFile(bool from_director, const Uptane::Target& target) override; - std::unique_ptr openTargetFile(const Uptane::Target& target) override; - boost::optional> checkTargetFile(const Uptane::Target& target) const override; - std::vector getTargetFiles() override; - void removeTargetFile(const std::string& target_name) override; - void cleanUp() override; + void storeDeviceDataHash(const std::string& data_type, const std::string& hash) override; + bool loadDeviceDataHash(const std::string& data_type, std::string* hash) const override; + void clearDeviceData() override; + + void storeTargetFilename(const std::string& targetname, const std::string& filename) const override; + std::string getTargetFilename(const std::string& targetname) const override; + std::vector getAllTargetNames() const override; + void deleteTargetInfo(const std::string& targetname) const override; + StorageType type() override { return StorageType::kSqlite; }; private: - boost::filesystem::path images_path_{sqldb_path_.parent_path() / "images"}; - void cleanMetaVersion(Uptane::RepositoryType repo, const Uptane::Role& role); }; diff --git a/src/libaktualizr/storage/sqlstorage_base.cc b/src/libaktualizr/storage/sqlstorage_base.cc index 0f209d9531..63d86741b0 100644 --- a/src/libaktualizr/storage/sqlstorage_base.cc +++ b/src/libaktualizr/storage/sqlstorage_base.cc @@ -2,6 +2,10 @@ #include "storage_exception.h" #include +#include +#include + +#include "utilities/utils.h" boost::filesystem::path SQLStorageBase::dbPath() const { return sqldb_path_; } @@ -32,6 +36,7 @@ SQLStorageBase::SQLStorageBase(boost::filesystem::path sqldb_path, bool readonly int current_schema_version) : sqldb_path_(std::move(sqldb_path)), readonly_(readonly), + mutex_(new std::mutex()), schema_migrations_(std::move(schema_migrations)), schema_rollback_migrations_(std::move(schema_rollback_migrations)), current_schema_(std::move(current_schema)), @@ -45,12 +50,14 @@ SQLStorageBase::SQLStorageBase(boost::filesystem::path sqldb_path, bool readonly throw StorageException(std::string("Could not check storage directory permissions: ") + std::strerror(errno)); } if ((st.st_mode & (S_IWGRP | S_IWOTH)) != 0) { - throw StorageException("Storage directory has unsafe permissions"); + throw StorageException( + "Storage directory has unsafe permissions (it should not be readable or writeable by group nor others)"); } if ((st.st_mode & (S_IRGRP | S_IROTH)) != 0) { // Remove read permissions for group and others if (chmod(db_parent_path.c_str(), S_IRWXU) < 0) { - throw StorageException("Storage directory has unsafe permissions"); + throw StorageException( + "Storage directory has unsafe permissions (it should not be readable or writeable by group nor others)"); } } } @@ -72,9 +79,9 @@ SQLStorageBase::SQLStorageBase(boost::filesystem::path sqldb_path, bool readonly } SQLite3Guard SQLStorageBase::dbConnection() const { - SQLite3Guard db(dbPath(), readonly_); + SQLite3Guard db(dbPath(), readonly_, mutex_); if (db.get_rc() != SQLITE_OK) { - throw SQLException(std::string("Can't open database: ") + db.errmsg()); + throw SQLInternalException(std::string("Can't open database: ") + db.errmsg()); } return db; } @@ -134,8 +141,9 @@ bool SQLStorageBase::dbMigrateForward(int version_from, int version_to) { SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); + try { + db.beginTransaction(); + } catch (const SQLException& e) { return false; } @@ -211,8 +219,9 @@ bool SQLStorageBase::dbMigrate() { LOG_INFO << "Bootstraping DB to version " << current_schema_version_; SQLite3Guard db = dbConnection(); - if (!db.beginTransaction()) { - LOG_ERROR << "Can't start transaction: " << db.errmsg(); + try { + db.beginTransaction(); + } catch (const SQLException& e) { return false; } diff --git a/src/libaktualizr/storage/sqlstorage_base.h b/src/libaktualizr/storage/sqlstorage_base.h index d656f59580..4b1ca90445 100644 --- a/src/libaktualizr/storage/sqlstorage_base.h +++ b/src/libaktualizr/storage/sqlstorage_base.h @@ -1,24 +1,24 @@ #ifndef SQLSTORAGE_BASE_H_ #define SQLSTORAGE_BASE_H_ -#include +#include #include #include +#include "libaktualizr/config.h" #include "sql_utils.h" -#include "storage_config.h" enum class DbVersion : int32_t { kEmpty = -1, kInvalid = -2 }; class StorageLock { public: StorageLock() = default; - StorageLock(boost::filesystem::path path); - StorageLock(StorageLock &other) = delete; - StorageLock &operator=(StorageLock &other) = delete; + explicit StorageLock(boost::filesystem::path path); + virtual ~StorageLock(); + StorageLock(const StorageLock &other) = delete; StorageLock(StorageLock &&other) = default; + StorageLock &operator=(const StorageLock &other) = delete; StorageLock &operator=(StorageLock &&other) = default; - virtual ~StorageLock(); class locked_exception : std::runtime_error { public: @@ -35,7 +35,6 @@ class SQLStorageBase { explicit SQLStorageBase(boost::filesystem::path sqldb_path, bool readonly, std::vector schema_migrations, std::vector schema_rollback_migrations, std::string current_schema, int current_schema_version); - ~SQLStorageBase() = default; std::string getTableSchemaFromDb(const std::string &tablename); bool dbMigrateForward(int version_from, int version_to = 0); bool dbMigrateBackward(int version_from, int version_to = 0); @@ -48,6 +47,7 @@ class SQLStorageBase { bool readonly_{false}; StorageLock lock; + std::shared_ptr mutex_; const std::vector schema_migrations_; std::vector schema_rollback_migrations_; diff --git a/src/libaktualizr/storage/sqlstorage_test.cc b/src/libaktualizr/storage/sqlstorage_test.cc index 57b0a294ac..f4ce1f3c74 100644 --- a/src/libaktualizr/storage/sqlstorage_test.cc +++ b/src/libaktualizr/storage/sqlstorage_test.cc @@ -1,12 +1,13 @@ -#include - #include +#include +#include + #include "logging/logging.h" #include "storage/sql_utils.h" #include "storage/sqlstorage.h" #include "uptane/directorrepository.h" -#include "uptane/imagesrepository.h" +#include "uptane/imagerepository.h" #include "utilities/utils.h" boost::filesystem::path test_data_dir; @@ -168,31 +169,33 @@ TEST(sqlstorage, migrate_back) { config.path = temp_dir.Path(); SQLStorage storage(config, false); - - SQLite3Guard db(temp_dir / "sql.db"); auto ver = storage.getVersion(); - std::string migration_script = - "\ - BEGIN TRANSACTION;\ - CREATE TABLE test_table(test_text TEXT NOT NULL, test_int INT NOT NULL);\ - INSERT INTO test_table VALUES(\"test_text\", 123);\ - DELETE FROM version;\ - INSERT INTO version VALUES( " + - std::to_string(static_cast(ver) + 1) + - " );\ - COMMIT TRANSACTION;"; - db.exec(migration_script, NULL, NULL); - - std::string back_migration_script = - "\ - DROP TABLE test_table; \ - DELETE FROM version;\ - INSERT INTO version VALUES(" + - std::to_string(static_cast(ver)) + ");"; - - auto statement = db.prepareStatement("insert into rollback_migrations VALUES (?,?);", static_cast(ver) + 1, - back_migration_script); - statement.step(); + + { + SQLite3Guard db(temp_dir / "sql.db"); + std::string migration_script = + "\ + BEGIN TRANSACTION;\ + CREATE TABLE test_table(test_text TEXT NOT NULL, test_int INT NOT NULL);\ + INSERT INTO test_table VALUES(\"test_text\", 123);\ + DELETE FROM version;\ + INSERT INTO version VALUES( " + + std::to_string(static_cast(ver) + 1) + + " );\ + COMMIT TRANSACTION;"; + db.exec(migration_script, NULL, NULL); + + std::string back_migration_script = + "\ + DROP TABLE test_table; \ + DELETE FROM version;\ + INSERT INTO version VALUES(" + + std::to_string(static_cast(ver)) + ");"; + + auto statement = db.prepareStatement("insert into rollback_migrations VALUES (?,?);", static_cast(ver) + 1, + back_migration_script); + statement.step(); + } EXPECT_EQ(static_cast(storage.getVersion()), static_cast(ver) + 1); EXPECT_TRUE(storage.dbMigrate()); @@ -446,21 +449,21 @@ TEST(sqlstorage, migrate_root_works) { std::string raw_director_root; storage.loadRoot(&raw_director_root, Uptane::RepositoryType::Director(), Uptane::Version()); Uptane::DirectorRepository director; - EXPECT_TRUE(director.initRoot(raw_director_root)); + EXPECT_NO_THROW(director.initRoot(Uptane::RepositoryType(Uptane::RepositoryType::DIRECTOR), raw_director_root)); std::string raw_director_targets; storage.loadNonRoot(&raw_director_targets, Uptane::RepositoryType::Director(), Uptane::Role::Targets()); - EXPECT_TRUE(director.verifyTargets(raw_director_targets)); + EXPECT_NO_THROW(director.verifyTargets(raw_director_targets)); - // Images - std::string raw_images_root; - storage.loadRoot(&raw_images_root, Uptane::RepositoryType::Image(), Uptane::Version()); - Uptane::ImagesRepository imagesrepository; - EXPECT_TRUE(imagesrepository.initRoot(raw_images_root)); + // Image repo + std::string raw_image_root; + storage.loadRoot(&raw_image_root, Uptane::RepositoryType::Image(), Uptane::Version()); + Uptane::ImageRepository imagerepository; + EXPECT_NO_THROW(imagerepository.initRoot(Uptane::RepositoryType(Uptane::RepositoryType::IMAGE), raw_image_root)); // Check that the roots are different and haven't been swapped - EXPECT_NE(raw_director_root, raw_images_root); + EXPECT_NE(raw_director_root, raw_image_root); Json::Value director_json = Utils::parseJSON(raw_director_root); Json::Value sign = director_json["signed"]; EXPECT_EQ(sign["_type"], "Root"); @@ -513,6 +516,33 @@ TEST(sqlstorage, migrate_from_fs) { } } +TEST(sqlstorage, store_and_load_report_events) { + TemporaryDirectory temp_dir; + StorageConfig config; + config.path = temp_dir.Path(); + auto storage = INvStorage::newStorage(config); + + const int event_numb{10}; + for (int ii = 0; ii < event_numb; ++ii) { + storage->saveReportEvent(Utils::parseJSON(R"("id": "some ID", "eventType": "some Event")")); + } + int64_t max_id; + { + Json::Value events{Json::arrayValue}; + storage->loadReportEvents(&events, &max_id); + EXPECT_EQ(events.size(), event_numb); + } + const std::vector event_number_limits{1, 4, 3, 5}; + int processed_events{0}; + for (const auto& l : event_number_limits) { + Json::Value events{Json::arrayValue}; + storage->loadReportEvents(&events, &max_id, l); + EXPECT_EQ(events.size(), l < (event_numb - processed_events) ? l : event_numb - processed_events); + storage->deleteReportEvents(max_id); + processed_events += l; + } +} + #ifndef __NO_MAIN__ int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); diff --git a/src/libaktualizr/storage/storage_common_test.cc b/src/libaktualizr/storage/storage_common_test.cc index 74a12f934a..a73020ea7e 100644 --- a/src/libaktualizr/storage/storage_common_test.cc +++ b/src/libaktualizr/storage/storage_common_test.cc @@ -5,26 +5,22 @@ #include -#include "logging/logging.h" +#include "crypto/crypto.h" +#include "libaktualizr/types.h" +#include "repo.h" #include "storage/sqlstorage.h" -#include "utilities/types.h" #include "utilities/utils.h" -StorageType current_storage_type{StorageType::kSqlite}; +namespace fs = boost::filesystem; -std::unique_ptr Storage(const boost::filesystem::path &dir) { +std::unique_ptr Storage(const fs::path &dir) { StorageConfig storage_config; - storage_config.type = current_storage_type; + storage_config.type = StorageType::kSqlite; storage_config.path = dir; - - if (storage_config.type == StorageType::kSqlite) { - return std::unique_ptr(new SQLStorage(storage_config, false)); - } else { - throw std::runtime_error("Invalid config type"); - } + return std::unique_ptr(new SQLStorage(storage_config, false)); } -StorageConfig MakeConfig(StorageType type, const boost::filesystem::path &storage_dir) { +StorageConfig MakeConfig(StorageType type, const fs::path &storage_dir) { StorageConfig config; config.type = type; @@ -36,8 +32,8 @@ StorageConfig MakeConfig(StorageType type, const boost::filesystem::path &storag return config; } -/* Load and store primary keys. */ -TEST(storage, load_store_primary_keys) { +/* Load and store Primary keys. */ +TEST(StorageCommon, LoadStorePrimaryKeys) { TemporaryDirectory temp_dir; std::unique_ptr storage = Storage(temp_dir.Path()); @@ -55,7 +51,7 @@ TEST(storage, load_store_primary_keys) { } /* Load and store TLS credentials. */ -TEST(storage, load_store_tls) { +TEST(StorageCommon, LoadStoreTls) { TemporaryDirectory temp_dir; std::unique_ptr storage = Storage(temp_dir.Path()); @@ -75,7 +71,7 @@ TEST(storage, load_store_tls) { } /* Load and store Uptane metadata. */ -TEST(storage, load_store_metadata) { +TEST(StorageCommon, LoadStoreMetadata) { TemporaryDirectory temp_dir; std::unique_ptr storage = Storage(temp_dir.Path()); @@ -100,7 +96,7 @@ TEST(storage, load_store_metadata) { Json::Value meta_root; meta_root["signed"] = root_json; std::string director_root = Utils::jsonToStr(meta_root); - std::string images_root = Utils::jsonToStr(meta_root); + std::string image_root = Utils::jsonToStr(meta_root); Json::Value targets_json; targets_json["_type"] = "Targets"; @@ -117,12 +113,12 @@ TEST(storage, load_store_metadata) { Json::Value meta_targets; meta_targets["signed"] = targets_json; std::string director_targets = Utils::jsonToStr(meta_targets); - std::string images_targets = Utils::jsonToStr(meta_targets); + std::string image_targets = Utils::jsonToStr(meta_targets); Json::Value timestamp_json; timestamp_json["signed"]["_type"] = "Timestamp"; timestamp_json["signed"]["expires"] = "2038-01-19T03:14:06Z"; - std::string images_timestamp = Utils::jsonToStr(timestamp_json); + std::string image_timestamp = Utils::jsonToStr(timestamp_json); Json::Value snapshot_json; snapshot_json["_type"] = "Snapshot"; @@ -134,47 +130,47 @@ TEST(storage, load_store_metadata) { Json::Value meta_snapshot; meta_snapshot["signed"] = snapshot_json; - std::string images_snapshot = Utils::jsonToStr(meta_snapshot); + std::string image_snapshot = Utils::jsonToStr(meta_snapshot); storage->storeRoot(director_root, Uptane::RepositoryType::Director(), Uptane::Version(1)); storage->storeNonRoot(director_targets, Uptane::RepositoryType::Director(), Uptane::Role::Targets()); - storage->storeRoot(images_root, Uptane::RepositoryType::Image(), Uptane::Version(1)); - storage->storeNonRoot(images_targets, Uptane::RepositoryType::Image(), Uptane::Role::Targets()); - storage->storeNonRoot(images_timestamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()); - storage->storeNonRoot(images_snapshot, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()); + storage->storeRoot(image_root, Uptane::RepositoryType::Image(), Uptane::Version(1)); + storage->storeNonRoot(image_targets, Uptane::RepositoryType::Image(), Uptane::Role::Targets()); + storage->storeNonRoot(image_timestamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()); + storage->storeNonRoot(image_snapshot, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()); std::string loaded_director_root; std::string loaded_director_targets; - std::string loaded_images_root; - std::string loaded_images_targets; - std::string loaded_images_timestamp; - std::string loaded_images_snapshot; + std::string loaded_image_root; + std::string loaded_image_targets; + std::string loaded_image_timestamp; + std::string loaded_image_snapshot; EXPECT_TRUE(storage->loadLatestRoot(&loaded_director_root, Uptane::RepositoryType::Director())); EXPECT_TRUE( storage->loadNonRoot(&loaded_director_targets, Uptane::RepositoryType::Director(), Uptane::Role::Targets())); - EXPECT_TRUE(storage->loadLatestRoot(&loaded_images_root, Uptane::RepositoryType::Image())); - EXPECT_TRUE(storage->loadNonRoot(&loaded_images_targets, Uptane::RepositoryType::Image(), Uptane::Role::Targets())); + EXPECT_TRUE(storage->loadLatestRoot(&loaded_image_root, Uptane::RepositoryType::Image())); + EXPECT_TRUE(storage->loadNonRoot(&loaded_image_targets, Uptane::RepositoryType::Image(), Uptane::Role::Targets())); EXPECT_TRUE( - storage->loadNonRoot(&loaded_images_timestamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp())); - EXPECT_TRUE(storage->loadNonRoot(&loaded_images_snapshot, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot())); + storage->loadNonRoot(&loaded_image_timestamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp())); + EXPECT_TRUE(storage->loadNonRoot(&loaded_image_snapshot, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot())); EXPECT_EQ(director_root, loaded_director_root); EXPECT_EQ(director_targets, loaded_director_targets); - EXPECT_EQ(images_root, loaded_images_root); - EXPECT_EQ(images_targets, loaded_images_targets); - EXPECT_EQ(images_timestamp, loaded_images_timestamp); - EXPECT_EQ(images_snapshot, loaded_images_snapshot); + EXPECT_EQ(image_root, loaded_image_root); + EXPECT_EQ(image_targets, loaded_image_targets); + EXPECT_EQ(image_timestamp, loaded_image_timestamp); + EXPECT_EQ(image_snapshot, loaded_image_snapshot); storage->clearNonRootMeta(Uptane::RepositoryType::Director()); storage->clearNonRootMeta(Uptane::RepositoryType::Image()); EXPECT_FALSE( storage->loadNonRoot(&loaded_director_targets, Uptane::RepositoryType::Director(), Uptane::Role::Targets())); EXPECT_FALSE( - storage->loadNonRoot(&loaded_images_timestamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp())); + storage->loadNonRoot(&loaded_image_timestamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp())); } /* Load and store Uptane roots. */ -TEST(storage, load_store_root) { +TEST(StorageCommon, LoadStoreRoot) { TemporaryDirectory temp_dir; std::unique_ptr storage = Storage(temp_dir.Path()); @@ -210,7 +206,7 @@ TEST(storage, load_store_root) { } /* Load and store the device ID. */ -TEST(storage, load_store_deviceid) { +TEST(StorageCommon, LoadStoreDeviceId) { TemporaryDirectory temp_dir; std::unique_ptr storage = Storage(temp_dir.Path()); @@ -229,7 +225,7 @@ TEST(storage, load_store_deviceid) { /* Load and store ECU serials. * Preserve ECU ordering between store and load calls. */ -TEST(storage, load_store_ecu_serials) { +TEST(StorageCommon, LoadStoreEcuSerials) { TemporaryDirectory temp_dir; std::unique_ptr storage = Storage(temp_dir.Path()); @@ -249,24 +245,21 @@ TEST(storage, load_store_ecu_serials) { } /* Load and store a list of misconfigured ECUs. */ -TEST(storage, load_store_misconfigured_ecus) { +TEST(StorageCommon, LoadStoreMisconfiguredEcus) { TemporaryDirectory temp_dir; std::unique_ptr storage = Storage(temp_dir.Path()); - std::vector ecus; - ecus.push_back(MisconfiguredEcu(Uptane::EcuSerial("primary"), Uptane::HardwareIdentifier("primary_hw"), - EcuState::kNotRegistered)); - - storage->storeMisconfiguredEcus(ecus); + storage->saveMisconfiguredEcu( + {Uptane::EcuSerial("primary"), Uptane::HardwareIdentifier("primary_hw"), EcuState::kOld}); std::vector ecus_out; EXPECT_TRUE(storage->loadMisconfiguredEcus(&ecus_out)); - EXPECT_EQ(ecus_out.size(), ecus.size()); + EXPECT_EQ(ecus_out.size(), 1); EXPECT_EQ(ecus_out[0].serial, Uptane::EcuSerial("primary")); EXPECT_EQ(ecus_out[0].hardware_id, Uptane::HardwareIdentifier("primary_hw")); - EXPECT_EQ(ecus_out[0].state, EcuState::kNotRegistered); + EXPECT_EQ(ecus_out[0].state, EcuState::kOld); storage->clearMisconfiguredEcus(); ecus_out.clear(); @@ -274,7 +267,7 @@ TEST(storage, load_store_misconfigured_ecus) { } /* Load and store a flag indicating successful registration. */ -TEST(storage, load_store_ecu_registered) { +TEST(StorageCommon, LoadStoreEcuRegistered) { TemporaryDirectory temp_dir; std::unique_ptr storage = Storage(temp_dir.Path()); @@ -290,14 +283,14 @@ TEST(storage, load_store_ecu_registered) { } /* Load and store installed versions. */ -TEST(storage, load_store_installed_versions) { +TEST(StorageCommon, LoadStoreInstalledVersions) { TemporaryDirectory temp_dir; std::unique_ptr storage = Storage(temp_dir.Path()); - // Test lazy primary installed version: primary ecu serial is not defined yet - const std::vector hashes = { - Uptane::Hash{Uptane::Hash::Type::kSha256, "2561"}, - Uptane::Hash{Uptane::Hash::Type::kSha512, "5121"}, + // Test lazy Primary installed version: Primary ECU serial is not defined yet + const std::vector hashes = { + Hash{Hash::Type::kSha256, "2561"}, + Hash{Hash::Type::kSha512, "5121"}, }; Uptane::EcuMap primary_ecu{{Uptane::EcuSerial("primary"), Uptane::HardwareIdentifier("primary_hw")}}; Uptane::Target t1{"update.bin", primary_ecu, hashes, 1, "corrid"}; @@ -321,6 +314,7 @@ TEST(storage, load_store_installed_versions) { { boost::optional current; EXPECT_TRUE(storage->loadInstalledVersions("primary", ¤t, nullptr)); + EXPECT_FALSE(storage->hasPendingInstall()); EXPECT_TRUE(!!current); EXPECT_EQ(current->filename(), "update.bin"); EXPECT_EQ(current->sha256Hash(), "2561"); @@ -333,24 +327,26 @@ TEST(storage, load_store_installed_versions) { } // Set t2 as a pending version - Uptane::Target t2{"update2.bin", primary_ecu, {Uptane::Hash{Uptane::Hash::Type::kSha256, "2562"}}, 2}; + Uptane::Target t2{"update2.bin", primary_ecu, {Hash{Hash::Type::kSha256, "2562"}}, 2}; storage->savePrimaryInstalledVersion(t2, InstalledVersionUpdateMode::kPending); { boost::optional pending; EXPECT_TRUE(storage->loadInstalledVersions("primary", nullptr, &pending)); EXPECT_TRUE(!!pending); + EXPECT_TRUE(storage->hasPendingInstall()); EXPECT_EQ(pending->filename(), "update2.bin"); } // Set t3 as the new pending - Uptane::Target t3{"update3.bin", primary_ecu, {Uptane::Hash{Uptane::Hash::Type::kSha256, "2563"}}, 3}; + Uptane::Target t3{"update3.bin", primary_ecu, {Hash{Hash::Type::kSha256, "2563"}}, 3}; storage->savePrimaryInstalledVersion(t3, InstalledVersionUpdateMode::kPending); { boost::optional pending; EXPECT_TRUE(storage->loadInstalledVersions("primary", nullptr, &pending)); EXPECT_TRUE(!!pending); + EXPECT_TRUE(storage->hasPendingInstall()); EXPECT_EQ(pending->filename(), "update3.bin"); } @@ -364,6 +360,7 @@ TEST(storage, load_store_installed_versions) { EXPECT_TRUE(!!current); EXPECT_EQ(current->filename(), "update3.bin"); EXPECT_FALSE(!!pending); + EXPECT_FALSE(storage->hasPendingInstall()); std::vector log; storage->loadInstallationLog("primary", &log, true); @@ -378,6 +375,7 @@ TEST(storage, load_store_installed_versions) { storage->loadInstallationLog("primary", &log, true); EXPECT_EQ(log.size(), 3); EXPECT_EQ(log.back().filename(), "update.bin"); + EXPECT_FALSE(storage->hasPendingInstall()); } // Set t2 as the new pending and t3 as current afterwards: the pending flag @@ -392,6 +390,7 @@ TEST(storage, load_store_installed_versions) { EXPECT_TRUE(!!current); EXPECT_EQ(current->filename(), "update3.bin"); EXPECT_FALSE(!!pending); + EXPECT_FALSE(storage->hasPendingInstall()); std::vector log; storage->loadInstallationLog("primary", &log, true); @@ -400,9 +399,9 @@ TEST(storage, load_store_installed_versions) { EXPECT_EQ(log[0].custom_data()["foo"], "bar"); } - // Add a secondary installed version + // Add a Secondary installed version Uptane::EcuMap secondary_ecu{{Uptane::EcuSerial("secondary1"), Uptane::HardwareIdentifier("secondary_hw")}}; - Uptane::Target tsec{"secondary.bin", secondary_ecu, {Uptane::Hash{Uptane::Hash::Type::kSha256, "256s"}}, 4}; + Uptane::Target tsec{"secondary.bin", secondary_ecu, {Hash{Hash::Type::kSha256, "256s"}}, 4}; storage->saveInstalledVersion("secondary_1", tsec, InstalledVersionUpdateMode::kCurrent); { @@ -417,10 +416,10 @@ TEST(storage, load_store_installed_versions) { } /* - * Load and store an ecu installation result in an SQL database. + * Load and store an ECU installation result in an SQL database. * Load and store a device installation result in an SQL database. */ -TEST(storage, load_store_installation_results) { +TEST(StorageCommon, LoadStoreInstallationResults) { TemporaryDirectory temp_dir; std::unique_ptr storage = Storage(temp_dir.Path()); @@ -452,240 +451,108 @@ TEST(storage, load_store_installation_results) { EXPECT_EQ(dev_res.result_code.num_code, data::ResultCode::Numeric::kGeneralError); EXPECT_EQ(report, "raw"); EXPECT_EQ(correlation_id, "corrid"); + EXPECT_TRUE(storage->storeDeviceInstallationRawReport("user's raw report")); storage->clearInstallationResults(); res.clear(); EXPECT_FALSE(storage->loadEcuInstallationResults(&res)); EXPECT_EQ(res.size(), 0); EXPECT_FALSE(storage->loadDeviceInstallationResult(&dev_res, &report, &correlation_id)); + EXPECT_FALSE(storage->storeDeviceInstallationRawReport( + "This call will return a negative value since the installation report was cleaned!")); } -/* Load and store targets. */ -TEST(storage, store_target) { - TemporaryDirectory temp_dir; - std::unique_ptr storage = Storage(temp_dir.Path()); - - Json::Value target_json; - target_json["hashes"]["sha256"] = "hash"; - target_json["length"] = 2; - Uptane::Target target("some.deb", target_json); - - // write - { - std::unique_ptr fhandle = storage->allocateTargetFile(false, target); - const uint8_t wb[] = "ab"; - fhandle->wfeed(wb, 1); - fhandle->wfeed(wb + 1, 1); - fhandle->wcommit(); - } - - // read - { - std::unique_ptr rhandle = storage->openTargetFile(target); - uint8_t rb[3] = {0}; - EXPECT_EQ(rhandle->rsize(), 2); - rhandle->rread(rb, 1); - rhandle->rread(rb + 1, 1); - rhandle->rclose(); - EXPECT_STREQ(reinterpret_cast(rb), "ab"); - } - - // write again - { - std::unique_ptr fhandle = storage->allocateTargetFile(false, target); - const uint8_t wb[] = "ab"; - fhandle->wfeed(wb, 1); - fhandle->wfeed(wb + 1, 1); - fhandle->wcommit(); - } - - // delete - { - storage->removeTargetFile(target.filename()); - EXPECT_THROW(storage->openTargetFile(target), StorageTargetRHandle::ReadError); - EXPECT_THROW(storage->removeTargetFile(target.filename()), std::runtime_error); - } - - // write stream - { - std::unique_ptr fhandle = storage->allocateTargetFile(false, target); - std::stringstream("ab") >> *fhandle; - } - - // read stream - { - std::stringstream sstr; - std::unique_ptr rhandle = storage->openTargetFile(target); - sstr << *rhandle; - EXPECT_STREQ(sstr.str().c_str(), "ab"); - } -} - -/* - * List targets currently in storage. - * Remove a target binary from storage. - */ -TEST(storage, list_remove_targets) { +TEST(StorageCommon, DownloadedFilesInfo) { TemporaryDirectory temp_dir; std::unique_ptr storage = Storage(temp_dir.Path()); - Json::Value target_json; - target_json["hashes"]["sha256"] = "HASH"; - target_json["length"] = 2; - Uptane::Target target("some.deb", target_json); - - auto tfs = storage->getTargetFiles(); - EXPECT_EQ(tfs.size(), 0); - - // write - { - std::unique_ptr fhandle = storage->allocateTargetFile(false, target); - const uint8_t wb[] = "ab"; - fhandle->wfeed(wb, 1); - fhandle->wfeed(wb + 1, 1); - fhandle->wcommit(); - } - - tfs = storage->getTargetFiles(); - ASSERT_EQ(tfs.size(), 1); - - auto tf = tfs.at(0); + storage->storeTargetFilename("target1", "file1"); + storage->storeTargetFilename("target2", "file2"); + ASSERT_EQ(storage->getTargetFilename("target1"), "file1"); + ASSERT_EQ(storage->getTargetFilename("target2"), "file2"); - EXPECT_EQ(tf.filename(), "some.deb"); - EXPECT_EQ(tf.length(), 2); - EXPECT_EQ(tf.hashes().size(), 1); - EXPECT_EQ(tf.hashes().at(0), Uptane::Hash(Uptane::Hash::Type::kSha256, "HASH")); + auto names = storage->getAllTargetNames(); + ASSERT_EQ(names.size(), 2); + ASSERT_EQ(names.at(0), "target1"); + ASSERT_EQ(names.at(1), "target2"); - // note: implementation specific - EXPECT_TRUE(boost::filesystem::exists(temp_dir.Path() / "images" / "HASH")); - - storage->removeTargetFile(tf.filename()); - - tfs = storage->getTargetFiles(); - EXPECT_EQ(tfs.size(), 0); - EXPECT_FALSE(boost::filesystem::exists(temp_dir.Path() / "images" / "HASH")); + storage->deleteTargetInfo("target1"); + names = storage->getAllTargetNames(); + ASSERT_EQ(names.size(), 1); + ASSERT_EQ(names.at(0), "target2"); } -TEST(storage, checksum) { +TEST(StorageCommon, LoadStoreSecondaryInfo) { TemporaryDirectory temp_dir; std::unique_ptr storage = Storage(temp_dir.Path()); - Json::Value target_json1; - target_json1["hashes"]["sha256"] = "hash1"; - target_json1["length"] = 2; - Uptane::Target target1("some.deb", target_json1); - Json::Value target_json2; - target_json2["length"] = 2; - target_json2["hashes"]["sha256"] = "hash2"; - Uptane::Target target2("some.deb", target_json2); - - // write target1 - { - std::unique_ptr fhandle = storage->allocateTargetFile(false, target1); - const uint8_t wb[] = "ab"; - fhandle->wfeed(wb, 2); - fhandle->wcommit(); - } - - // read target 1 - { - std::unique_ptr rhandle = storage->openTargetFile(target1); - uint8_t rb[3] = {0}; - EXPECT_EQ(rhandle->rsize(), 2); - rhandle->rread(rb, 2); - rhandle->rclose(); - EXPECT_STREQ(reinterpret_cast(rb), "ab"); - } - - // read target 2 - { EXPECT_THROW(storage->openTargetFile(target2), StorageTargetRHandle::ReadError); } -} + // note: this can be done before the ECU is known + storage->saveSecondaryData(Uptane::EcuSerial("secondary_2"), "data2"); -TEST(storage, partial) { - TemporaryDirectory temp_dir; - std::unique_ptr storage = Storage(temp_dir.Path()); - - Json::Value target_json; - target_json["hashes"]["sha256"] = "hash1"; - target_json["length"] = 3; - Uptane::Target target("some.deb", target_json); + EcuSerials serials{{Uptane::EcuSerial("primary"), Uptane::HardwareIdentifier("primary_hw")}, + {Uptane::EcuSerial("secondary_1"), Uptane::HardwareIdentifier("secondary_hw")}, + {Uptane::EcuSerial("secondary_2"), Uptane::HardwareIdentifier("secondary_hw")}}; + storage->storeEcuSerials(serials); - // write partial target - { - std::unique_ptr fhandle = storage->allocateTargetFile(false, target); - const uint8_t wb[] = "a"; - fhandle->wfeed(wb, 1); - fhandle->wcommit(); - } + storage->saveSecondaryInfo(Uptane::EcuSerial("secondary_1"), "ip", PublicKey("key1", KeyType::kED25519)); - // read and check partial target - { - std::unique_ptr rhandle = storage->openTargetFile(target); - uint8_t rb[2] = {0}; - EXPECT_EQ(rhandle->rsize(), 1); - EXPECT_TRUE(rhandle->isPartial()); - rhandle->rread(rb, 1); - rhandle->rclose(); - EXPECT_STREQ(reinterpret_cast(rb), "a"); - } + EXPECT_THROW(storage->saveSecondaryInfo(Uptane::EcuSerial("primary"), "ip", PublicKey("key0", KeyType::kRSA2048)), + std::logic_error); - // Append without committing, should commit in whandle destructor - { - std::unique_ptr whandle = storage->openTargetFile(target)->toWriteHandle(); - const uint8_t wb[] = "b"; - whandle->wfeed(wb, 1); - } + std::vector sec_infos; + EXPECT_TRUE(storage->loadSecondariesInfo(&sec_infos)); - // read and check partial target - { - std::unique_ptr rhandle = storage->openTargetFile(target); - uint8_t rb[3] = {0}; - EXPECT_EQ(rhandle->rsize(), 2); - EXPECT_TRUE(rhandle->isPartial()); - rhandle->rread(rb, 2); - rhandle->rclose(); - EXPECT_STREQ(reinterpret_cast(rb), "ab"); - } + ASSERT_EQ(sec_infos.size(), 2); + EXPECT_EQ(sec_infos[0].serial.ToString(), "secondary_1"); + EXPECT_EQ(sec_infos[0].hw_id.ToString(), "secondary_hw"); + EXPECT_EQ(sec_infos[0].type, "ip"); + EXPECT_EQ(sec_infos[0].pub_key.Value(), "key1"); + EXPECT_EQ(sec_infos[0].pub_key.Type(), KeyType::kED25519); + EXPECT_EQ(sec_infos[1].pub_key.Type(), KeyType::kUnknown); + EXPECT_EQ(sec_infos[1].type, ""); + EXPECT_EQ(sec_infos[1].extra, "data2"); - // Append partial - { - std::unique_ptr whandle = storage->openTargetFile(target)->toWriteHandle(); - const uint8_t wb[] = "c"; - whandle->wfeed(wb, 1); - whandle->wcommit(); - } + // test update of data + storage->saveSecondaryInfo(Uptane::EcuSerial("secondary_1"), "ip", PublicKey("key2", KeyType::kED25519)); + storage->saveSecondaryData(Uptane::EcuSerial("secondary_1"), "data1"); + EXPECT_TRUE(storage->loadSecondariesInfo(&sec_infos)); - // Check full target - { - std::unique_ptr rhandle = storage->openTargetFile(target); - EXPECT_EQ(rhandle->rsize(), 3); - EXPECT_FALSE(rhandle->isPartial()); - } + ASSERT_EQ(sec_infos.size(), 2); + EXPECT_EQ(sec_infos[0].pub_key.Value(), "key2"); + EXPECT_EQ(sec_infos[0].extra, "data1"); } -/* Import keys and credentials from file into storage. */ -TEST(storage, import_data) { +/* Import keys and credentials from file into storage. + * Re-import updated credentials from file into storage. + * Reject new certificate with a different device ID. */ +TEST(StorageImport, ImportData) { TemporaryDirectory temp_dir; std::unique_ptr storage = Storage(temp_dir.Path()); - boost::filesystem::create_directories(temp_dir / "import"); + fs::create_directories(temp_dir / "import"); ImportConfig import_config; import_config.base_path = temp_dir.Path() / "import"; - import_config.uptane_private_key_path = BasedPath("private"); - import_config.uptane_public_key_path = BasedPath("public"); - import_config.tls_cacert_path = BasedPath("ca"); - import_config.tls_clientcert_path = BasedPath("cert"); - import_config.tls_pkey_path = BasedPath("pkey"); + import_config.uptane_private_key_path = utils::BasedPath("private"); + import_config.uptane_public_key_path = utils::BasedPath("public"); + import_config.tls_cacert_path = utils::BasedPath("ca"); + import_config.tls_clientcert_path = utils::BasedPath("cert"); + import_config.tls_pkey_path = utils::BasedPath("pkey"); + + std::string tls_cert_in1; + std::string tls_pkey_in1; + const std::string device_id1 = "test_id1"; + StructGuard certificate1 = Crypto::generateCert(1024, 365, "", "", "", device_id1, true); + Crypto::serializeCert(&tls_pkey_in1, &tls_cert_in1, certificate1.get()); Utils::writeFile(import_config.uptane_private_key_path.get(import_config.base_path).string(), std::string("uptane_private_1")); Utils::writeFile(import_config.uptane_public_key_path.get(import_config.base_path).string(), std::string("uptane_public_1")); Utils::writeFile(import_config.tls_cacert_path.get(import_config.base_path).string(), std::string("tls_cacert_1")); - Utils::writeFile(import_config.tls_clientcert_path.get(import_config.base_path).string(), std::string("tls_cert_1")); - Utils::writeFile(import_config.tls_pkey_path.get(import_config.base_path).string(), std::string("tls_pkey_1")); + Utils::writeFile(import_config.tls_clientcert_path.get(import_config.base_path).string(), tls_cert_in1); + Utils::writeFile(import_config.tls_pkey_path.get(import_config.base_path).string(), tls_pkey_in1); - // Initially the storage is empty + // Initially the storage is empty. EXPECT_FALSE(storage->loadPrimaryPublic(nullptr)); EXPECT_FALSE(storage->loadPrimaryPrivate(nullptr)); EXPECT_FALSE(storage->loadTlsCa(nullptr)); @@ -693,6 +560,8 @@ TEST(storage, import_data) { EXPECT_FALSE(storage->loadTlsPkey(nullptr)); storage->importData(import_config); + // Set the device ID to simulate initialization with the given certificate. + storage->storeDeviceId(device_id1); std::string primary_public; std::string primary_private; @@ -700,7 +569,7 @@ TEST(storage, import_data) { std::string tls_cert; std::string tls_pkey; - // the data has been imported + // Verify that the data has been imported. EXPECT_TRUE(storage->loadPrimaryPublic(&primary_public)); EXPECT_TRUE(storage->loadPrimaryPrivate(&primary_private)); EXPECT_TRUE(storage->loadTlsCa(&tls_ca)); @@ -710,16 +579,54 @@ TEST(storage, import_data) { EXPECT_EQ(primary_private, "uptane_private_1"); EXPECT_EQ(primary_public, "uptane_public_1"); EXPECT_EQ(tls_ca, "tls_cacert_1"); - EXPECT_EQ(tls_cert, "tls_cert_1"); - EXPECT_EQ(tls_pkey, "tls_pkey_1"); + EXPECT_EQ(tls_cert, tls_cert_in1); + EXPECT_EQ(tls_pkey, tls_pkey_in1); + + // Create second TLS cert/key (with a different device ID) and other dummy + // files. + std::string tls_cert_in2; + std::string tls_pkey_in2; + const std::string device_id2 = "test_id2"; + StructGuard certificate2 = Crypto::generateCert(1024, 365, "", "", "", device_id2, true); + Crypto::serializeCert(&tls_pkey_in2, &tls_cert_in2, certificate2.get()); + EXPECT_NE(tls_cert_in1, tls_cert_in2); + EXPECT_NE(tls_pkey_in1, tls_pkey_in2); Utils::writeFile(import_config.uptane_private_key_path.get(import_config.base_path).string(), std::string("uptane_private_2")); Utils::writeFile(import_config.uptane_public_key_path.get(import_config.base_path).string(), std::string("uptane_public_2")); Utils::writeFile(import_config.tls_cacert_path.get(import_config.base_path).string(), std::string("tls_cacert_2")); - Utils::writeFile(import_config.tls_clientcert_path.get(import_config.base_path).string(), std::string("tls_cert_2")); - Utils::writeFile(import_config.tls_pkey_path.get(import_config.base_path).string(), std::string("tls_pkey_2")); + Utils::writeFile(import_config.tls_clientcert_path.get(import_config.base_path).string(), tls_cert_in2); + Utils::writeFile(import_config.tls_pkey_path.get(import_config.base_path).string(), tls_pkey_in2); + + // Attempt to re-import, TLS cert's device ID changed. It allow reimport but keeps old device ID in the storage. + EXPECT_NO_THROW(storage->importData(import_config)); + + EXPECT_TRUE(storage->loadPrimaryPublic(&primary_public)); + EXPECT_TRUE(storage->loadPrimaryPrivate(&primary_private)); + EXPECT_TRUE(storage->loadTlsCa(&tls_ca)); + EXPECT_TRUE(storage->loadTlsCert(&tls_cert)); + EXPECT_TRUE(storage->loadTlsPkey(&tls_pkey)); + + // Allow import but do not update primary keys. + EXPECT_EQ(primary_private, "uptane_private_1"); + EXPECT_EQ(primary_public, "uptane_public_1"); + EXPECT_EQ(tls_ca, "tls_cacert_2"); + EXPECT_EQ(tls_cert, tls_cert_in2); + EXPECT_EQ(tls_pkey, tls_pkey_in2); + + // Create third TLS cert/key (with the same device ID as the first) and other + // dummy files. + std::string tls_cert_in3; + std::string tls_pkey_in3; + StructGuard certificate3 = Crypto::generateCert(1024, 365, "", "", "", device_id1, true); + Crypto::serializeCert(&tls_pkey_in3, &tls_cert_in3, certificate3.get()); + EXPECT_NE(tls_cert_in1, tls_cert_in3); + EXPECT_NE(tls_pkey_in1, tls_pkey_in3); + + Utils::writeFile(import_config.tls_clientcert_path.get(import_config.base_path).string(), tls_cert_in3); + Utils::writeFile(import_config.tls_pkey_path.get(import_config.base_path).string(), tls_pkey_in3); storage->importData(import_config); @@ -729,12 +636,49 @@ TEST(storage, import_data) { EXPECT_TRUE(storage->loadTlsCert(&tls_cert)); EXPECT_TRUE(storage->loadTlsPkey(&tls_pkey)); - // only root cert is being updated + // All TLS objects should be updated exept primary keys. EXPECT_EQ(primary_private, "uptane_private_1"); EXPECT_EQ(primary_public, "uptane_public_1"); EXPECT_EQ(tls_ca, "tls_cacert_2"); - EXPECT_EQ(tls_cert, "tls_cert_1"); - EXPECT_EQ(tls_pkey, "tls_pkey_1"); + EXPECT_EQ(tls_cert, tls_cert_in3); + EXPECT_EQ(tls_pkey, tls_pkey_in3); +} + +TEST(StorageImport, ImportInitialRoot) { + TemporaryDirectory temp_dir; + std::unique_ptr storage = Storage(temp_dir.Path()); + fs::create_directories(temp_dir / "import"); + + ImportConfig import_config; + import_config.base_path = temp_dir.Path() / "import"; + + // Generate a set of valid Uptane root keys + auto repo_path = temp_dir.Path() / "repo"; + Repo image_repo{Uptane::RepositoryType::Image(), repo_path, "", ""}; + image_repo.generateRepo(); + Repo director_repo{Uptane::RepositoryType::Director(), repo_path, "", ""}; + director_repo.generateRepo(); + director_repo.rotate(Uptane::Role::Root()); + + EXPECT_FALSE(storage->loadLatestRoot(nullptr, Uptane::RepositoryType::Image())); + EXPECT_FALSE(storage->loadLatestRoot(nullptr, Uptane::RepositoryType::Director())); + + fs::create_directories(import_config.base_path / "repo"); + fs::create_directories(import_config.base_path / "director"); + + fs::copy(repo_path / "repo/repo/root.json", import_config.base_path / "repo/root.json"); + Utils::writeFile(import_config.base_path / "director/root.json", std::string("invalid")); + + storage->importData(import_config); + EXPECT_TRUE(storage->loadLatestRoot(nullptr, Uptane::RepositoryType::Image())); + EXPECT_FALSE(storage->loadLatestRoot(nullptr, Uptane::RepositoryType::Director())) + << "Director root.json was invalid. It shouldn't have been imported"; + + // Copy the real director root.json over + fs::copy_file(repo_path / "repo/director/root.json", import_config.base_path / "director/root.json", + fs::copy_option::overwrite_if_exists); + storage->importData(import_config); + EXPECT_TRUE(storage->loadLatestRoot(nullptr, Uptane::RepositoryType::Director())); } #ifndef __NO_MAIN__ @@ -742,11 +686,6 @@ int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); logger_init(); logger_set_threshold(boost::log::trivial::trace); - - std::cout << "Running tests for SQLStorage" << std::endl; - current_storage_type = StorageType::kSqlite; - int res_sql = RUN_ALL_TESTS(); - - return res_sql; // 0 indicates success + return RUN_ALL_TESTS(); } #endif diff --git a/src/libaktualizr/storage/storage_config.cc b/src/libaktualizr/storage/storage_config.cc index 8aeed2b039..dca74ac70b 100644 --- a/src/libaktualizr/storage/storage_config.cc +++ b/src/libaktualizr/storage/storage_config.cc @@ -1,21 +1,5 @@ -#include "storage_config.h" - -std::ostream& operator<<(std::ostream& os, const StorageType stype) { - std::string stype_str; - switch (stype) { - case StorageType::kFileSystem: - stype_str = "filesystem"; - break; - case StorageType::kSqlite: - stype_str = "sqlite"; - break; - default: - stype_str = "unknown"; - break; - } - os << '"' << stype_str << '"'; - return os; -} +#include "libaktualizr/config.h" +#include "utilities/config_utils.h" void StorageConfig::updateFromPropertyTree(const boost::property_tree::ptree& pt) { CopyFromConfig(type, "type", pt); diff --git a/src/libaktualizr/storage/storage_config.h b/src/libaktualizr/storage/storage_config.h deleted file mode 100644 index c5045684ad..0000000000 --- a/src/libaktualizr/storage/storage_config.h +++ /dev/null @@ -1,59 +0,0 @@ -#ifndef STORAGE_CONFIG_H -#define STORAGE_CONFIG_H - -#include -#include - -#include -#include - -#include "utilities/config_utils.h" - -enum class StorageType { kFileSystem = 0, kSqlite }; -std::ostream& operator<<(std::ostream& os, StorageType stype); - -struct StorageConfig { - StorageType type{StorageType::kSqlite}; - boost::filesystem::path path{"/var/sota"}; - - // FS storage - BasedPath uptane_metadata_path{"metadata"}; - BasedPath uptane_private_key_path{"ecukey.der"}; - BasedPath uptane_public_key_path{"ecukey.pub"}; - BasedPath tls_cacert_path{"root.crt"}; - BasedPath tls_pkey_path{"pkey.pem"}; - BasedPath tls_clientcert_path{"client.pem"}; - - // SQLite storage - BasedPath sqldb_path{"sql.db"}; // based on `/var/sota` - - void updateFromPropertyTree(const boost::property_tree::ptree& pt); - void writeToStream(std::ostream& out_stream) const; -}; - -struct ImportConfig { - boost::filesystem::path base_path{"/var/sota/import"}; - BasedPath uptane_private_key_path{""}; - BasedPath uptane_public_key_path{""}; - BasedPath tls_cacert_path{""}; - BasedPath tls_pkey_path{""}; - BasedPath tls_clientcert_path{""}; - - void updateFromPropertyTree(const boost::property_tree::ptree& pt); - void writeToStream(std::ostream& out_stream) const; -}; - -template <> -inline void CopyFromConfig(StorageType& dest, const std::string& option_name, const boost::property_tree::ptree& pt) { - boost::optional value = pt.get_optional(option_name); - if (value.is_initialized()) { - std::string storage_type{StripQuotesFromStrings(value.get())}; - if (storage_type == "sqlite") { - dest = StorageType::kSqlite; - } else { - dest = StorageType::kFileSystem; - } - } -} - -#endif // STORAGE_CONFIG_H diff --git a/src/libaktualizr/storage/storage_exception.h b/src/libaktualizr/storage/storage_exception.h index eb9ab4e1cb..71a64c3616 100644 --- a/src/libaktualizr/storage/storage_exception.h +++ b/src/libaktualizr/storage/storage_exception.h @@ -1,10 +1,12 @@ #ifndef STORAGE_EXCEPTION_H_ #define STORAGE_EXCEPTION_H_ +#include +#include + class StorageException : public std::runtime_error { public: - StorageException(const std::string& what) : std::runtime_error(what) {} - ~StorageException() noexcept override = default; + explicit StorageException(const std::string& what) : std::runtime_error(what) {} }; #endif // STORAGE_EXCEPTION_H_ diff --git a/src/libaktualizr/telemetry/CMakeLists.txt b/src/libaktualizr/telemetry/CMakeLists.txt index 0d628efeec..32f3a9b6b6 100644 --- a/src/libaktualizr/telemetry/CMakeLists.txt +++ b/src/libaktualizr/telemetry/CMakeLists.txt @@ -1,6 +1,4 @@ set(SOURCES telemetryconfig.cc) -set(HEADERS telemetryconfig.h) - target_sources(config PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/telemetryconfig.cc) -aktualizr_source_file_checks(${SOURCES} ${HEADERS}) +aktualizr_source_file_checks(${SOURCES}) diff --git a/src/libaktualizr/telemetry/telemetryconfig.cc b/src/libaktualizr/telemetry/telemetryconfig.cc index e0f476ace3..01de250d1d 100644 --- a/src/libaktualizr/telemetry/telemetryconfig.cc +++ b/src/libaktualizr/telemetry/telemetryconfig.cc @@ -1,11 +1,13 @@ -#include "telemetry/telemetryconfig.h" +#include "libaktualizr/config.h" #include "utilities/config_utils.h" void TelemetryConfig::updateFromPropertyTree(const boost::property_tree::ptree& pt) { CopyFromConfig(report_network, "report_network", pt); + CopyFromConfig(report_config, "report_config", pt); } void TelemetryConfig::writeToStream(std::ostream& out_stream) const { writeOption(out_stream, report_network, "report_network"); + writeOption(out_stream, report_config, "report_config"); } diff --git a/src/libaktualizr/telemetry/telemetryconfig.h b/src/libaktualizr/telemetry/telemetryconfig.h deleted file mode 100644 index e433e6f3bf..0000000000 --- a/src/libaktualizr/telemetry/telemetryconfig.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef TELEMETRY_TELEMETRY_CONFIG_H_ -#define TELEMETRY_TELEMETRY_CONFIG_H_ - -#include - -struct TelemetryConfig { - /** - * Report device network information: IP address, hostname, MAC address - */ - bool report_network{true}; - - void updateFromPropertyTree(const boost::property_tree::ptree& pt); - void writeToStream(std::ostream& out_stream) const; -}; - -#endif // TELEMETRY_TELEMETRY_CONFIG_H_ \ No newline at end of file diff --git a/src/libaktualizr/uptane/CMakeLists.txt b/src/libaktualizr/uptane/CMakeLists.txt index d34ff93703..e550f164fd 100644 --- a/src/libaktualizr/uptane/CMakeLists.txt +++ b/src/libaktualizr/uptane/CMakeLists.txt @@ -1,34 +1,31 @@ set(SOURCES + directorrepository.cc fetcher.cc + imagerepository.cc iterator.cc + manifest.cc metawithkeys.cc role.cc root.cc + secondary_metadata.cc tuf.cc - uptanerepository.cc - directorrepository.cc - imagesrepository.cc) + uptanerepository.cc) set(HEADERS + directorrepository.h exceptions.h fetcher.h + imagerepository.h iterator.h - secondaryinterface.h + manifest.h + secondary_metadata.h tuf.h - uptanerepository.h - directorrepository.h - imagesrepository.h) + uptanerepository.h) add_library(uptane OBJECT ${SOURCES}) -if (BUILD_ISOTP) - target_sources(uptane PRIVATE isotpsecondary.cc) - target_include_directories(uptane PRIVATE ${PROJECT_SOURCE_DIR}/third_party/isotp-c/src) -endif (BUILD_ISOTP) - add_aktualizr_test(NAME tuf SOURCES tuf_test.cc PROJECT_WORKING_DIRECTORY) -add_aktualizr_test(NAME tuf_hash SOURCES tuf_hash_test.cc PROJECT_WORKING_DIRECTORY) if(BUILD_OSTREE AND SOTA_PACKED_CREDENTIALS) add_aktualizr_test(NAME uptane_ci SOURCES uptane_ci_test.cc PROJECT_WORKING_DIRECTORY @@ -40,9 +37,14 @@ else(BUILD_OSTREE AND SOTA_PACKED_CREDENTIALS) endif(BUILD_OSTREE AND SOTA_PACKED_CREDENTIALS) -add_aktualizr_test(NAME uptane SOURCES uptane_test.cc PROJECT_WORKING_DIRECTORY LIBRARIES uptane_generator_lib) +add_aktualizr_test(NAME uptane + SOURCES uptane_test.cc + PROJECT_WORKING_DIRECTORY + LIBRARIES uptane_generator_lib provisioner_test_utils virtual_secondary) +set_property(SOURCE uptane_test.cc + PROPERTY COMPILE_DEFINITIONS + TEST_PKCS11_MODULE_PATH="${TEST_PKCS11_MODULE_PATH}") set_tests_properties(test_uptane PROPERTIES LABELS "crypto") -target_link_libraries(t_uptane virtual_secondary) add_aktualizr_test(NAME uptane_delegation SOURCES uptane_delegation_test.cc PROJECT_WORKING_DIRECTORY ARGS "$" LIBRARIES uptane_generator_lib) @@ -50,17 +52,31 @@ add_dependencies(t_uptane_delegation uptane-generator) target_link_libraries(t_uptane_delegation virtual_secondary) set_tests_properties(test_uptane_delegation PROPERTIES LABELS "crypto") -add_aktualizr_test(NAME uptane_network SOURCES uptane_network_test.cc PROJECT_WORKING_DIRECTORY) +add_aktualizr_test(NAME uptane_network + SOURCES uptane_network_test.cc + PROJECT_WORKING_DIRECTORY + LIBRARIES uptane_generator_lib virtual_secondary) set_tests_properties(test_uptane_network PROPERTIES LABELS "crypto") -target_link_libraries(t_uptane_network virtual_secondary) -add_aktualizr_test(NAME uptane_serial SOURCES uptane_serial_test.cc ARGS ${PROJECT_BINARY_DIR} - PROJECT_WORKING_DIRECTORY LIBRARIES uptane_generator_lib) -target_link_libraries(t_uptane_serial virtual_secondary) +add_aktualizr_test(NAME uptane_serial SOURCES uptane_serial_test.cc + PROJECT_WORKING_DIRECTORY + LIBRARIES uptane_generator_lib virtual_secondary) -add_aktualizr_test(NAME uptane_init SOURCES uptane_init_test.cc PROJECT_WORKING_DIRECTORY LIBRARIES PUBLIC uptane_generator_lib) +if(BUILD_OSTREE) + # Test that SotaUptaneClient::AssembleManifest() works correctly under OSTree. + # This requires the OSTree sysroot created by make_ostree_sysroot in + # package_manager/CMakeLists.txt, which is run as a dependency of the + # build_tests target. + add_aktualizr_test(NAME uptane_ostree SOURCES uptane_ostree_test.cc + PROJECT_WORKING_DIRECTORY + ARGS ${PROJECT_BINARY_DIR}/ostree_repo + LIBRARIES uptane_generator_lib virtual_secondary) +else(BUILD_OSTREE) + list(APPEND TEST_SOURCES uptane_ostree_test.cc) +endif(BUILD_OSTREE) -add_aktualizr_test(NAME director SOURCES director_test.cc PROJECT_WORKING_DIRECTORY +add_aktualizr_test(NAME director SOURCES director_test.cc + PROJECT_WORKING_DIRECTORY ARGS "$") -aktualizr_source_file_checks(${SOURCES} ${HEADERS} isotpsecondary.cc isotpsecondary.h ${TEST_SOURCES}) +aktualizr_source_file_checks(${SOURCES} ${HEADERS} ${TEST_SOURCES}) diff --git a/src/libaktualizr/uptane/director_test.cc b/src/libaktualizr/uptane/director_test.cc index 116a1ae928..aec7e8505d 100644 --- a/src/libaktualizr/uptane/director_test.cc +++ b/src/libaktualizr/uptane/director_test.cc @@ -1,6 +1,7 @@ #include #include "directorrepository.h" +#include "logging/logging.h" #include "test_utils.h" #include "utilities/utils.h" @@ -9,8 +10,8 @@ boost::filesystem::path uptane_generator_path; namespace Uptane { /* - * Verify that we correctly persist non-empty targets metadata after receiving - * subsequent targets metadata that is empty. + * Verify that we correctly persist non-empty Targets metadata after receiving + * subsequent Targets metadata that is empty. */ TEST(Director, EmptyTargets) { TemporaryDirectory meta_dir; @@ -19,9 +20,10 @@ TEST(Director, EmptyTargets) { uptane_gen.run({"generate", "--path", meta_dir.PathString()}); DirectorRepository director; - EXPECT_TRUE(director.initRoot(Utils::readFile(meta_dir.Path() / "repo/director/root.json"))); + EXPECT_NO_THROW(director.initRoot(Uptane::RepositoryType(Uptane::RepositoryType::DIRECTOR), + Utils::readFile(meta_dir.Path() / "repo/director/root.json"))); - EXPECT_TRUE(director.verifyTargets(Utils::readFile(meta_dir.Path() / "repo/director/targets.json"))); + EXPECT_NO_THROW(director.verifyTargets(Utils::readFile(meta_dir.Path() / "repo/director/targets.json"))); EXPECT_TRUE(director.targets.targets.empty()); EXPECT_TRUE(director.latest_targets.targets.empty()); @@ -31,7 +33,7 @@ TEST(Director, EmptyTargets) { "--serial", "CA:FE:A6:D2:84:9D"}); uptane_gen.run({"signtargets", "--path", meta_dir.PathString()}); - EXPECT_TRUE(director.verifyTargets(Utils::readFile(meta_dir.Path() / "repo/director/targets.json"))); + EXPECT_NO_THROW(director.verifyTargets(Utils::readFile(meta_dir.Path() / "repo/director/targets.json"))); EXPECT_EQ(director.targets.targets.size(), 1); EXPECT_EQ(director.targets.targets[0].filename(), "firmware.txt"); EXPECT_EQ(director.targets.targets.size(), director.latest_targets.targets.size()); @@ -39,7 +41,7 @@ TEST(Director, EmptyTargets) { uptane_gen.run({"emptytargets", "--path", meta_dir.PathString()}); uptane_gen.run({"signtargets", "--path", meta_dir.PathString(), "--correlationid", "abc123"}); - EXPECT_TRUE(director.verifyTargets(Utils::readFile(meta_dir.Path() / "repo/director/targets.json"))); + EXPECT_NO_THROW(director.verifyTargets(Utils::readFile(meta_dir.Path() / "repo/director/targets.json"))); EXPECT_EQ(director.targets.targets.size(), 1); EXPECT_EQ(director.targets.targets[0].filename(), "firmware.txt"); EXPECT_TRUE(director.latest_targets.targets.empty()); diff --git a/src/libaktualizr/uptane/directorrepository.cc b/src/libaktualizr/uptane/directorrepository.cc index c1291a9e90..e4dbd31688 100644 --- a/src/libaktualizr/uptane/directorrepository.cc +++ b/src/libaktualizr/uptane/directorrepository.cc @@ -1,5 +1,11 @@ #include "directorrepository.h" +#include "fetcher.h" +#include "logging/logging.h" +#include "storage/invstorage.h" +#include "uptane/exceptions.h" +#include "utilities/utils.h" + namespace Uptane { void DirectorRepository::resetMeta() { @@ -8,7 +14,32 @@ void DirectorRepository::resetMeta() { latest_targets = Targets(); } -bool DirectorRepository::targetsExpired() const { return latest_targets.isExpired(TimeStamp::Now()); } +void DirectorRepository::checkTargetsExpired() { + if (latest_targets.isExpired(TimeStamp::Now())) { + throw Uptane::ExpiredMetadata(type.ToString(), Role::TARGETS); + } +} + +void DirectorRepository::targetsSanityCheck() { + // 5.4.4.6.6. If checking Targets metadata from the Director repository, + // verify that there are no delegations. + if (!latest_targets.delegated_role_names_.empty()) { + throw Uptane::InvalidMetadata(type.ToString(), Role::TARGETS, "Found unexpected delegation."); + } + // 5.4.4.6.7. If checking Targets metadata from the Director repository, + // check that no ECU identifier is represented more than once. + std::set ecu_ids; + for (const auto& target : targets.targets) { + for (const auto& ecu : target.ecus()) { + if (ecu_ids.find(ecu.first) == ecu_ids.end()) { + ecu_ids.insert(ecu.first); + } else { + LOG_ERROR << "ECU " << ecu.first << " appears twice in Director's Targets"; + throw Uptane::InvalidMetadata(type.ToString(), Role::TARGETS, "Found repeated ECU ID."); + } + } + } +} bool DirectorRepository::usePreviousTargets() const { // Don't store the new targets if they are empty and we've previously received @@ -16,7 +47,7 @@ bool DirectorRepository::usePreviousTargets() const { return !targets.targets.empty() && latest_targets.targets.empty(); } -bool DirectorRepository::verifyTargets(const std::string& targets_raw) { +void DirectorRepository::verifyTargets(const std::string& targets_raw) { try { // Verify the signature: latest_targets = Targets(RepositoryType::Director(), Role::Targets(), Utils::parseJSON(targets_raw), @@ -25,28 +56,24 @@ bool DirectorRepository::verifyTargets(const std::string& targets_raw) { targets = latest_targets; } } catch (const Uptane::Exception& e) { - LOG_ERROR << "Signature verification for director targets metadata failed"; - last_exception = e; - return false; + LOG_ERROR << "Signature verification for Director Targets metadata failed"; + throw; } - return true; } -bool DirectorRepository::checkMetaOffline(INvStorage& storage) { +void DirectorRepository::checkMetaOffline(INvStorage& storage) { resetMeta(); // Load Director Root Metadata { std::string director_root; if (!storage.loadLatestRoot(&director_root, RepositoryType::Director())) { - return false; + throw Uptane::SecurityException(RepositoryType::DIRECTOR, "Could not load latest root"); } - if (!initRoot(director_root)) { - return false; - } + initRoot(RepositoryType(RepositoryType::DIRECTOR), director_root); if (rootExpired()) { - return false; + throw Uptane::ExpiredMetadata(RepositoryType::DIRECTOR, Role::ROOT); } } @@ -55,103 +82,96 @@ bool DirectorRepository::checkMetaOffline(INvStorage& storage) { std::string director_targets; if (!storage.loadNonRoot(&director_targets, RepositoryType::Director(), Role::Targets())) { - return false; + throw Uptane::SecurityException(RepositoryType::DIRECTOR, "Could not load Targets role"); } - if (!verifyTargets(director_targets)) { - return false; - } + verifyTargets(director_targets); - if (targetsExpired()) { - return false; - } - } + checkTargetsExpired(); - return true; + targetsSanityCheck(); + } } -bool DirectorRepository::updateMeta(INvStorage& storage, Fetcher& fetcher) { +void DirectorRepository::updateMeta(INvStorage& storage, const IMetadataFetcher& fetcher) { // Uptane step 2 (download time) is not implemented yet. // Uptane step 3 (download metadata) - // reset director repo to initial state before starting UPTANE iteration + // reset Director repo to initial state before starting Uptane iteration resetMeta(); - // Load Initial Director Root Metadata - { - std::string director_root; - if (storage.loadLatestRoot(&director_root, RepositoryType::Director())) { - if (!initRoot(director_root)) { - return false; - } - } else { - if (!fetcher.fetchRole(&director_root, kMaxRootSize, RepositoryType::Director(), Role::Root(), Version(1))) { - return false; - } - if (!initRoot(director_root)) { - return false; - } - storage.storeRoot(director_root, RepositoryType::Director(), Version(1)); - } - } - // Update Director Root Metadata - { - std::string director_root; - if (!fetcher.fetchLatestRole(&director_root, kMaxRootSize, RepositoryType::Director(), Role::Root())) { - return false; - } - int remote_version = extractVersionUntrusted(director_root); - int local_version = rootVersion(); - - for (int version = local_version + 1; version <= remote_version; ++version) { - if (!fetcher.fetchRole(&director_root, kMaxRootSize, RepositoryType::Director(), Role::Root(), - Version(version))) { - return false; - } + updateRoot(storage, fetcher, RepositoryType::Director()); - if (!verifyRoot(director_root)) { - return false; - } - storage.storeRoot(director_root, RepositoryType::Director(), Version(version)); - storage.clearNonRootMeta(RepositoryType::Director()); - } + // Not supported: 3. Download and check the Timestamp metadata file from the Director repository, following the + // procedure in Section 5.4.4.4. Not supported: 4. Download and check the Snapshot metadata file from the Director + // repository, following the procedure in Section 5.4.4.5. - if (rootExpired()) { - return false; - } - } // Update Director Targets Metadata { std::string director_targets; - if (!fetcher.fetchLatestRole(&director_targets, kMaxDirectorTargetsSize, RepositoryType::Director(), - Role::Targets())) { - return false; - } + fetcher.fetchLatestRole(&director_targets, kMaxDirectorTargetsSize, RepositoryType::Director(), Role::Targets()); int remote_version = extractVersionUntrusted(director_targets); int local_version; std::string director_targets_stored; if (storage.loadNonRoot(&director_targets_stored, RepositoryType::Director(), Role::Targets())) { local_version = extractVersionUntrusted(director_targets_stored); - if (!verifyTargets(director_targets_stored)) { - LOG_WARNING << "Unable to verify stored director targets metadata."; + try { + verifyTargets(director_targets_stored); + } catch (const std::exception& e) { + LOG_WARNING << "Unable to verify stored Director Targets metadata."; } } else { local_version = -1; } - if (!verifyTargets(director_targets)) { - return false; - } + verifyTargets(director_targets); + // TODO(OTA-4940): check if versions are equal but content is different. In + // that case, the member variable targets is updated, but it isn't stored in + // the database, which can cause some minor confusion. if (local_version > remote_version) { - return false; + throw Uptane::SecurityException(RepositoryType::DIRECTOR, "Rollback attempt"); } else if (local_version < remote_version && !usePreviousTargets()) { storage.storeNonRoot(director_targets, RepositoryType::Director(), Role::Targets()); } - if (targetsExpired()) { + checkTargetsExpired(); + + targetsSanityCheck(); + } +} + +void DirectorRepository::dropTargets(INvStorage& storage) { + try { + storage.clearNonRootMeta(RepositoryType::Director()); + resetMeta(); + } catch (const Uptane::Exception& ex) { + LOG_ERROR << "Failed to reset Director Targets metadata: " << ex.what(); + } +} + +bool DirectorRepository::matchTargetsWithImageTargets( + const std::shared_ptr& image_targets) const { + // step 10 of https://uptane.github.io/papers/ieee-isto-6100.1.0.0.uptane-standard.html#rfc.section.5.4.4.2 + // TODO(OTA-4800): support delegations. Consider reusing findTargetInDelegationTree(), + // but it would need to be moved into a common place to be resued by Primary and Secondary. + // Currently this is only used by aktualizr-secondary, but according to the + // Standard, "A Secondary ECU MAY elect to perform this check only on the + // metadata for the image it will install". + if (image_targets == nullptr) { + return false; + } + const auto& image_target_array = image_targets->targets; + const auto& director_target_array = targets.targets; + + for (const auto& director_target : director_target_array) { + auto found_it = std::find_if( + image_target_array.begin(), image_target_array.end(), + [&director_target](const Target& image_target) { return director_target.MatchTarget(image_target); }); + + if (found_it == image_target_array.end()) { return false; } } @@ -159,9 +179,4 @@ bool DirectorRepository::updateMeta(INvStorage& storage, Fetcher& fetcher) { return true; } -void DirectorRepository::dropTargets(INvStorage& storage) { - storage.clearNonRootMeta(RepositoryType::Director()); - resetMeta(); -} - } // namespace Uptane diff --git a/src/libaktualizr/uptane/directorrepository.h b/src/libaktualizr/uptane/directorrepository.h index 51f6b1ceac..523a3ed2c6 100644 --- a/src/libaktualizr/uptane/directorrepository.h +++ b/src/libaktualizr/uptane/directorrepository.h @@ -3,38 +3,43 @@ #include "gtest/gtest_prod.h" -#include "fetcher.h" #include "uptanerepository.h" namespace Uptane { -/* Director repository encapsulates state of metadata verification process. Subsequent verificaton steps rely on +/* Director repository encapsulates state of metadata verification process. Subsequent verification steps rely on * previous ones. */ class DirectorRepository : public RepositoryCommon { public: DirectorRepository() : RepositoryCommon(RepositoryType::Director()) {} - void resetMeta(); - bool verifyTargets(const std::string& targets_raw); - const std::vector& getTargets() const { return targets.targets; } + void verifyTargets(const std::string& targets_raw); + const Targets& getTargets() const { return targets; } + std::vector getTargets(const Uptane::EcuSerial& ecu_id, + const Uptane::HardwareIdentifier& hw_id) const { + return targets.getTargets(ecu_id, hw_id); + } const std::string& getCorrelationId() const { return targets.correlation_id(); } - bool targetsExpired() const; - bool usePreviousTargets() const; - bool checkMetaOffline(INvStorage& storage); + void checkMetaOffline(INvStorage& storage); void dropTargets(INvStorage& storage); - Exception getLastException() const { return last_exception; } - bool updateMeta(INvStorage& storage, Fetcher& fetcher); + void updateMeta(INvStorage& storage, const IMetadataFetcher& fetcher) override; + bool matchTargetsWithImageTargets(const std::shared_ptr& image_targets) const; private: FRIEND_TEST(Director, EmptyTargets); + + void resetMeta(); + void checkTargetsExpired(); + void targetsSanityCheck(); + bool usePreviousTargets() const; + // Since the Director can send us an empty targets list to mean "no new // updates", we have to persist the previous targets list. Use the latest for // checking expiration but the most recent non-empty list for everything else. Uptane::Targets targets; // Only empty if we've never received non-empty targets. Uptane::Targets latest_targets; // Can be an empty list. - Exception last_exception{"", ""}; }; } // namespace Uptane diff --git a/src/libaktualizr/uptane/exceptions.h b/src/libaktualizr/uptane/exceptions.h index 1fbd61ea80..e960ca6f05 100644 --- a/src/libaktualizr/uptane/exceptions.h +++ b/src/libaktualizr/uptane/exceptions.h @@ -11,127 +11,127 @@ class Exception : public std::logic_error { public: Exception(std::string reponame, const std::string& what_arg) : std::logic_error(what_arg.c_str()), reponame_(std::move(reponame)) {} - ~Exception() noexcept override = default; virtual std::string getName() const { return reponame_; }; protected: std::string reponame_; }; +class MetadataFetchFailure : public Exception { + public: + MetadataFetchFailure(const std::string& reponame, const std::string& role) + : Exception(reponame, std::string("Failed to fetch role ") + role + " in " + reponame + " repository.") {} +}; + class SecurityException : public Exception { public: SecurityException(const std::string& reponame, const std::string& what_arg) : Exception(reponame, what_arg) {} - ~SecurityException() noexcept override = default; +}; + +class TargetContentMismatch : public Exception { + public: + explicit TargetContentMismatch(const std::string& targetname) + : Exception(targetname, "Director Target filename matches currently installed version, but content differs.") {} }; class TargetHashMismatch : public Exception { public: explicit TargetHashMismatch(const std::string& targetname) : Exception(targetname, "The target's calculated hash did not match the hash in the metadata.") {} - ~TargetHashMismatch() noexcept override = default; }; class OversizedTarget : public Exception { public: explicit OversizedTarget(const std::string& reponame) : Exception(reponame, "The target's size was greater than the size in the metadata.") {} - ~OversizedTarget() noexcept override = default; }; class IllegalThreshold : public Exception { public: IllegalThreshold(const std::string& reponame, const std::string& what_arg) : Exception(reponame, what_arg) {} - ~IllegalThreshold() noexcept override = default; }; class MissingRepo : public Exception { public: explicit MissingRepo(const std::string& reponame) : Exception(reponame, "The " + reponame + " repo is missing.") {} - ~MissingRepo() noexcept override = default; }; class UnmetThreshold : public Exception { public: UnmetThreshold(const std::string& reponame, const std::string& role) : Exception(reponame, "The " + role + " metadata had an unmet threshold.") {} - ~UnmetThreshold() noexcept override = default; }; class ExpiredMetadata : public Exception { public: ExpiredMetadata(const std::string& reponame, const std::string& role) : Exception(reponame, "The " + role + " metadata was expired.") {} - ~ExpiredMetadata() noexcept override = default; }; class InvalidMetadata : public Exception { public: InvalidMetadata(const std::string& reponame, const std::string& role, const std::string& reason) - : Exception(reponame, "The " + role + " metadata failed to parse:" + reason) {} - ~InvalidMetadata() noexcept override = default; + : Exception(reponame, "The " + role + " metadata failed to parse: " + reason) {} }; class TargetMismatch : public Exception { public: explicit TargetMismatch(const std::string& targetname) - : Exception(targetname, "The target metadata in image and director do not match.") {} - ~TargetMismatch() noexcept override = default; + : Exception(targetname, "The target metadata in the Image and Director repos do not match.") {} }; class NonUniqueSignatures : public Exception { public: NonUniqueSignatures(const std::string& reponame, const std::string& role) : Exception(reponame, "The role " + role + " had non-unique signatures.") {} - ~NonUniqueSignatures() noexcept override = default; }; class BadKeyId : public Exception { public: - BadKeyId(const std::string& reponame) : Exception(reponame, "A key has an incorrect associated key ID") {} - ~BadKeyId() noexcept override = default; + explicit BadKeyId(const std::string& reponame) : Exception(reponame, "A key has an incorrect associated key ID") {} }; class BadEcuId : public Exception { public: - BadEcuId(const std::string& reponame) - : Exception(reponame, "The target had an ECU ID that did not match the client's configured ECU id.") {} - ~BadEcuId() noexcept override = default; + explicit BadEcuId(const std::string& reponame) + : Exception(reponame, "The target had an ECU ID that did not match the client's configured ECU ID.") {} }; class BadHardwareId : public Exception { public: - BadHardwareId(const std::string& reponame) - : Exception(reponame, "The target had a hardware ID that did not match the client's configured hardware id.") {} - ~BadHardwareId() noexcept override = default; + explicit BadHardwareId(const std::string& reponame) + : Exception(reponame, "The target had a hardware ID that did not match the client's configured hardware ID.") {} +}; + +class RootRotationError : public Exception { + public: + explicit RootRotationError(const std::string& reponame) + : Exception(reponame, "Version in Root metadata does not match its expected value.") {} }; class VersionMismatch : public Exception { public: VersionMismatch(const std::string& reponame, const std::string& role) : Exception(reponame, "The version of role " + role + " does not match the entry in Snapshot metadata.") {} - ~VersionMismatch() noexcept override = default; }; class DelegationHashMismatch : public Exception { public: explicit DelegationHashMismatch(const std::string& delegation_name) - : Exception("images", "The calculated hash of delegated role " + delegation_name + - " did not match the hash in the metadata.") {} - ~DelegationHashMismatch() noexcept override = default; + : Exception("image", "The calculated hash of delegated role " + delegation_name + + " did not match the hash in the metadata.") {} }; class DelegationMissing : public Exception { public: explicit DelegationMissing(const std::string& delegation_name) - : Exception("images", "The delegated role " + delegation_name + " is missing.") {} - ~DelegationMissing() noexcept override = default; + : Exception("image", "The delegated role " + delegation_name + " is missing.") {} }; class InvalidTarget : public Exception { public: - InvalidTarget(const std::string& reponame) + explicit InvalidTarget(const std::string& reponame) : Exception(reponame, "The target had a non-OSTree package that can not be installed on an OSTree system.") {} - ~InvalidTarget() noexcept override = default; }; } // namespace Uptane diff --git a/src/libaktualizr/uptane/fetcher.cc b/src/libaktualizr/uptane/fetcher.cc index 749c0250d2..51c31461ed 100644 --- a/src/libaktualizr/uptane/fetcher.cc +++ b/src/libaktualizr/uptane/fetcher.cc @@ -1,21 +1,21 @@ #include "fetcher.h" +#include "uptane/exceptions.h" + namespace Uptane { -bool Fetcher::fetchRole(std::string* result, int64_t maxsize, RepositoryType repo, const Uptane::Role& role, - Version version) { - // TODO: chain-loading root.json - std::string url = (repo == RepositoryType::Director()) ? config.uptane.director_server : config.uptane.repo_server; +void Fetcher::fetchRole(std::string* result, int64_t maxsize, RepositoryType repo, const Uptane::Role& role, + Version version) const { + std::string url = (repo == RepositoryType::Director()) ? director_server : repo_server; if (role.IsDelegation()) { url += "/delegations"; } url += "/" + version.RoleFileName(role); HttpResponse response = http->get(url, maxsize); if (!response.isOk()) { - return false; + throw Uptane::MetadataFetchFailure(repo.ToString(), role.ToString()); } *result = response.body; - return true; } } // namespace Uptane diff --git a/src/libaktualizr/uptane/fetcher.h b/src/libaktualizr/uptane/fetcher.h index 48c8cb025f..f5ebba2d0f 100644 --- a/src/libaktualizr/uptane/fetcher.h +++ b/src/libaktualizr/uptane/fetcher.h @@ -1,9 +1,9 @@ #ifndef UPTANE_FETCHER_H_ #define UPTANE_FETCHER_H_ -#include "config/config.h" #include "http/httpinterface.h" -#include "storage/invstorage.h" +#include "libaktualizr/config.h" +#include "tuf.h" namespace Uptane { @@ -11,22 +11,46 @@ constexpr int64_t kMaxRootSize = 64 * 1024; constexpr int64_t kMaxDirectorTargetsSize = 64 * 1024; constexpr int64_t kMaxTimestampSize = 64 * 1024; constexpr int64_t kMaxSnapshotSize = 64 * 1024; -constexpr int64_t kMaxImagesTargetsSize = 1024 * 1024; +constexpr int64_t kMaxImageTargetsSize = 8 * 1024 * 1024; -class Fetcher { +class IMetadataFetcher { + public: + IMetadataFetcher(const IMetadataFetcher&) = delete; + IMetadataFetcher& operator=(const IMetadataFetcher&) = delete; + IMetadataFetcher& operator=(IMetadataFetcher&&) = delete; + virtual ~IMetadataFetcher() = default; + + virtual void fetchRole(std::string* result, int64_t maxsize, RepositoryType repo, const Uptane::Role& role, + Version version) const = 0; + virtual void fetchLatestRole(std::string* result, int64_t maxsize, RepositoryType repo, + const Uptane::Role& role) const = 0; + + protected: + IMetadataFetcher() = default; + IMetadataFetcher(IMetadataFetcher&&) = default; +}; + +class Fetcher : public IMetadataFetcher { public: Fetcher(const Config& config_in, std::shared_ptr http_in) - : http(std::move(http_in)), config(config_in) {} - bool fetchRole(std::string* result, int64_t maxsize, RepositoryType repo, const Uptane::Role& role, Version version); - bool fetchLatestRole(std::string* result, int64_t maxsize, RepositoryType repo, const Uptane::Role& role) { - return fetchRole(result, maxsize, repo, role, Version()); + : Fetcher(config_in.uptane.repo_server, config_in.uptane.director_server, std::move(http_in)) {} + Fetcher(std::string repo_server_in, std::string director_server_in, std::shared_ptr http_in) + : http(std::move(http_in)), + repo_server(std::move(repo_server_in)), + director_server(std::move(director_server_in)) {} + void fetchRole(std::string* result, int64_t maxsize, RepositoryType repo, const Uptane::Role& role, + Version version) const override; + void fetchLatestRole(std::string* result, int64_t maxsize, RepositoryType repo, + const Uptane::Role& role) const override { + fetchRole(result, maxsize, repo, role, Version()); } - std::string getRepoServer() const { return config.uptane.repo_server; } + std::string getRepoServer() const { return repo_server; } private: std::shared_ptr http; - const Config& config; + std::string repo_server; + std::string director_server; }; } // namespace Uptane diff --git a/src/libaktualizr/uptane/imagerepository.cc b/src/libaktualizr/uptane/imagerepository.cc new file mode 100644 index 0000000000..99ad520af4 --- /dev/null +++ b/src/libaktualizr/uptane/imagerepository.cc @@ -0,0 +1,381 @@ +#include "imagerepository.h" + +#include "crypto/crypto.h" +#include "fetcher.h" +#include "logging/logging.h" +#include "storage/invstorage.h" +#include "uptane/exceptions.h" + +namespace Uptane { + +void ImageRepository::resetMeta() { + resetRoot(); + targets.reset(); + snapshot = Snapshot(); + timestamp = TimestampMeta(); +} + +void ImageRepository::verifyTimestamp(const std::string& timestamp_raw) { + try { + // Verify the signature: + timestamp = + TimestampMeta(RepositoryType::Image(), Utils::parseJSON(timestamp_raw), std::make_shared(root)); + } catch (const Exception& e) { + LOG_ERROR << "Signature verification for Timestamp metadata failed"; + throw; + } +} + +void ImageRepository::checkTimestampExpired() { + if (timestamp.isExpired(TimeStamp::Now())) { + throw Uptane::ExpiredMetadata(type.ToString(), Role::TIMESTAMP); + } +} + +void ImageRepository::fetchSnapshot(INvStorage& storage, const IMetadataFetcher& fetcher, const int local_version) { + std::string image_snapshot; + const int64_t snapshot_size = (snapshotSize() > 0) ? snapshotSize() : kMaxSnapshotSize; + fetcher.fetchLatestRole(&image_snapshot, snapshot_size, RepositoryType::Image(), Role::Snapshot()); + const int remote_version = extractVersionUntrusted(image_snapshot); + + // 6. Check that each Targets metadata filename listed in the previous Snapshot metadata file is also listed in this + // Snapshot metadata file. If this condition is not met, discard the new Snapshot metadata file, abort the update + // cycle, and report the failure. (Checks for a rollback attack.) + // See also https://github.com/uptane/deployment-considerations/pull/39/files. + // If the Snapshot is rotated, delegations may be safely removed. + // https://saeljira.it.here.com/browse/OTA-4121 + verifySnapshot(image_snapshot, false); + + if (local_version > remote_version) { + throw Uptane::SecurityException(RepositoryType::IMAGE, "Rollback attempt"); + } else { + storage.storeNonRoot(image_snapshot, RepositoryType::Image(), Role::Snapshot()); + } +} + +void ImageRepository::verifySnapshot(const std::string& snapshot_raw, bool prefetch) { + const std::string canonical = Utils::jsonToCanonicalStr(Utils::parseJSON(snapshot_raw)); + bool hash_exists = false; + for (const auto& it : timestamp.snapshot_hashes()) { + switch (it.type()) { + case Hash::Type::kSha256: + if (Hash(Hash::Type::kSha256, Crypto::sha256digestHex(canonical)) != it) { + if (!prefetch) { + LOG_ERROR << "Hash verification for Snapshot metadata failed"; + } + throw Uptane::SecurityException(RepositoryType::IMAGE, "Snapshot metadata hash verification failed"); + } + hash_exists = true; + break; + case Hash::Type::kSha512: + if (Hash(Hash::Type::kSha512, Crypto::sha512digestHex(canonical)) != it) { + if (!prefetch) { + LOG_ERROR << "Hash verification for Snapshot metadata failed"; + } + throw Uptane::SecurityException(RepositoryType::IMAGE, "Snapshot metadata hash verification failed"); + } + hash_exists = true; + break; + default: + break; + } + } + + if (!hash_exists) { + LOG_ERROR << "No hash found for shapshot.json"; + throw Uptane::SecurityException(RepositoryType::IMAGE, "Snapshot metadata hash verification failed"); + } + + try { + // Verify the signature: + snapshot = Snapshot(RepositoryType::Image(), Utils::parseJSON(snapshot_raw), std::make_shared(root)); + } catch (const Exception& e) { + LOG_ERROR << "Signature verification for Snapshot metadata failed"; + throw; + } + + if (snapshot.version() != timestamp.snapshot_version()) { + throw Uptane::VersionMismatch(RepositoryType::IMAGE, Uptane::Role::SNAPSHOT); + } +} + +void ImageRepository::checkSnapshotExpired() { + if (snapshot.isExpired(TimeStamp::Now())) { + throw Uptane::ExpiredMetadata(type.ToString(), Role::SNAPSHOT); + } +} + +void ImageRepository::fetchTargets(INvStorage& storage, const IMetadataFetcher& fetcher, const int local_version) { + std::string image_targets; + const Role targets_role = Role::Targets(); + + auto targets_size = getRoleSize(Role::Targets()); + if (targets_size <= 0) { + targets_size = kMaxImageTargetsSize; + } + + fetcher.fetchLatestRole(&image_targets, targets_size, RepositoryType::Image(), targets_role); + + const int remote_version = extractVersionUntrusted(image_targets); + + verifyTargets(image_targets, false, false); + + if (local_version > remote_version) { + throw Uptane::SecurityException(RepositoryType::IMAGE, "Rollback attempt"); + } else { + storage.storeNonRoot(image_targets, RepositoryType::Image(), targets_role); + } +} + +void ImageRepository::verifyRoleHashes(const std::string& role_data, const Uptane::Role& role, bool prefetch) const { + const std::string canonical = Utils::jsonToCanonicalStr(Utils::parseJSON(role_data)); + // Hashes are not required. If present, however, we may as well check them. + // This provides no security benefit, but may help with fault detection. + for (const auto& it : snapshot.role_hashes(role)) { + switch (it.type()) { + case Hash::Type::kSha256: + if (Hash(Hash::Type::kSha256, Crypto::sha256digestHex(canonical)) != it) { + if (!prefetch) { + LOG_ERROR << "Hash verification for " << role << " metadata failed"; + } + throw Uptane::SecurityException(RepositoryType::IMAGE, "Hash metadata mismatch"); + } + break; + case Hash::Type::kSha512: + if (Hash(Hash::Type::kSha512, Crypto::sha512digestHex(canonical)) != it) { + if (!prefetch) { + LOG_ERROR << "Hash verification for " << role << " metadata failed"; + } + throw Uptane::SecurityException(RepositoryType::IMAGE, "Hash metadata mismatch"); + } + break; + default: + break; + } + } +} + +int ImageRepository::getRoleVersion(const Uptane::Role& role) const { return snapshot.role_version(role); } + +int64_t ImageRepository::getRoleSize(const Uptane::Role& role) const { return snapshot.role_size(role); } + +void ImageRepository::verifyTargets(const std::string& targets_raw, bool prefetch, bool hash_change_expected) { + try { + verifyRoleHashes(targets_raw, Uptane::Role::Targets(), prefetch); + + auto targets_json = Utils::parseJSON(targets_raw); + + // Verify the signature: + auto signer = std::make_shared(root); + targets = std::make_shared( + Targets(RepositoryType::Image(), Uptane::Role::Targets(), targets_json, signer)); + + if (targets->version() != snapshot.role_version(Uptane::Role::Targets())) { + throw Uptane::VersionMismatch(RepositoryType::IMAGE, Uptane::Role::TARGETS); + } + } catch (const Uptane::SecurityException& e) { + if (hash_change_expected) { + LOG_DEBUG << "Signature verification for Image repo Targets metadata failed: " << e.what(); + } else { + LOG_ERROR << "Signature verification for Image repo Targets metadata failed: " << e.what(); + } + throw; + } catch (const Exception& e) { + LOG_ERROR << "Signature verification for Image repo Targets metadata failed: " << e.what(); + throw; + } +} + +std::shared_ptr ImageRepository::verifyDelegation(const std::string& delegation_raw, + const Uptane::Role& role, + const Targets& parent_target) { + try { + const Json::Value delegation_json = Utils::parseJSON(delegation_raw); + const std::string canonical = Utils::jsonToCanonicalStr(delegation_json); + + // Verify the signature: + auto signer = std::make_shared(parent_target); + return std::make_shared(Targets(RepositoryType::Image(), role, delegation_json, signer)); + } catch (const Exception& e) { + LOG_ERROR << "Signature verification for Image repo delegated Targets metadata failed"; + throw; + } + + return std::shared_ptr(nullptr); +} + +void ImageRepository::checkTargetsExpired() { + if (targets->isExpired(TimeStamp::Now())) { + throw Uptane::ExpiredMetadata(type.ToString(), Role::TARGETS); + } +} + +void ImageRepository::updateRoot(INvStorage& storage, const IMetadataFetcher& fetcher) { + resetMeta(); + RepositoryCommon::updateRoot(storage, fetcher, RepositoryType::Image()); +} + +void ImageRepository::updateMeta(INvStorage& storage, const IMetadataFetcher& fetcher) { + const auto timestamp_stored_signature{timestamp.isInitialized() ? timestamp.signature() : ""}; + bool snapshot_updated = false; + auto prev_timestamp = timestamp; + + updateRoot(storage, fetcher); + + // Update Image repo Timestamp metadata + { + std::string image_timestamp; + + fetcher.fetchLatestRole(&image_timestamp, kMaxTimestampSize, RepositoryType::Image(), Role::Timestamp()); + int remote_version = extractVersionUntrusted(image_timestamp); + + int local_version; + std::string image_timestamp_stored; + if (storage.loadNonRoot(&image_timestamp_stored, RepositoryType::Image(), Role::Timestamp())) { + local_version = extractVersionUntrusted(image_timestamp_stored); + } else { + local_version = -1; + } + + verifyTimestamp(image_timestamp); + + if (local_version > remote_version) { + throw Uptane::SecurityException(RepositoryType::IMAGE, "Rollback attempt"); + } else if (local_version < remote_version || timestamp_stored_signature != timestamp.signature()) { + // If local and remote versions are the same but their content actually differ then store/update the metadata in + // DB We assume that the metadata contains just one signature, otherwise the comparison might not always work + // correctly. + storage.storeNonRoot(image_timestamp, RepositoryType::Image(), Role::Timestamp()); + } + + checkTimestampExpired(); + } + + // Update Image repo Snapshot metadata + { + // First check if we already have the latest version according to the + // Timestamp metadata. + bool fetch_snapshot = true; + int local_version; + std::string image_snapshot_stored; + if (storage.loadNonRoot(&image_snapshot_stored, RepositoryType::Image(), Role::Snapshot())) { + try { + verifySnapshot(image_snapshot_stored, true); + fetch_snapshot = false; + LOG_DEBUG << "Skipping Image repo Snapshot download; stored version is still current."; + } catch (const Uptane::SecurityException& e) { + if (!prev_timestamp.isInitialized() || prev_timestamp.snapshot_hashes() != timestamp.snapshot_hashes()) { + // There were updates in the snapshot hashes within the timestamp meta, so we expect the current snapshot to + // be invalid, and there is no need to report an error + LOG_DEBUG << "Image repo Snapshot verification failed: " << e.what(); + } else { + LOG_ERROR << "Image repo Snapshot verification failed: " << e.what(); + } + } catch (const Uptane::Exception& e) { + LOG_ERROR << "Image repo Snapshot verification failed: " << e.what(); + } + local_version = snapshot.version(); + } else { + local_version = -1; + } + + // If we don't, attempt to fetch the latest. + if (fetch_snapshot) { + fetchSnapshot(storage, fetcher, local_version); + snapshot_updated = true; + } + + checkSnapshotExpired(); + } + + // Update Image repo Targets metadata + { + // First check if we already have the latest version according to the + // Snapshot metadata. + bool fetch_targets = true; + int local_version = -1; + std::string image_targets_stored; + if (storage.loadNonRoot(&image_targets_stored, RepositoryType::Image(), Role::Targets())) { + try { + verifyTargets(image_targets_stored, true, snapshot_updated); + fetch_targets = false; + LOG_DEBUG << "Skipping Image repo Targets download; stored version is still current."; + } catch (const Uptane::SecurityException& e) { + if (snapshot_updated) { + LOG_DEBUG << "Image repo Target verification failed: " << e.what(); + } else { + LOG_ERROR << "Image repo Target verification failed: " << e.what(); + } + } catch (const std::exception& e) { + LOG_ERROR << "Image repo Target verification failed: " << e.what(); + } + if (targets) { + local_version = targets->version(); + } + } + + // If we don't, attempt to fetch the latest. + if (fetch_targets) { + fetchTargets(storage, fetcher, local_version); + } + + checkTargetsExpired(); + } +} + +void ImageRepository::checkMetaOffline(INvStorage& storage) { + resetMeta(); + // Load Image repo Root metadata + { + std::string image_root; + if (!storage.loadLatestRoot(&image_root, RepositoryType::Image())) { + throw Uptane::SecurityException(RepositoryType::IMAGE, "Could not load latest root"); + } + + initRoot(RepositoryType(RepositoryType::IMAGE), image_root); + + if (rootExpired()) { + throw Uptane::ExpiredMetadata(RepositoryType::IMAGE, Role::Root().ToString()); + } + } + + // Load Image repo Timestamp metadata + { + std::string image_timestamp; + if (!storage.loadNonRoot(&image_timestamp, RepositoryType::Image(), Role::Timestamp())) { + throw Uptane::SecurityException(RepositoryType::IMAGE, "Could not load Timestamp role"); + } + + verifyTimestamp(image_timestamp); + + checkTimestampExpired(); + } + + // Load Image repo Snapshot metadata + { + std::string image_snapshot; + + if (!storage.loadNonRoot(&image_snapshot, RepositoryType::Image(), Role::Snapshot())) { + throw Uptane::SecurityException(RepositoryType::IMAGE, "Could not load Snapshot role"); + } + + verifySnapshot(image_snapshot, false); + + checkSnapshotExpired(); + } + + // Load Image repo Targets metadata + { + std::string image_targets; + Role targets_role = Uptane::Role::Targets(); + if (!storage.loadNonRoot(&image_targets, RepositoryType::Image(), targets_role)) { + throw Uptane::SecurityException(RepositoryType::IMAGE, "Could not load Image role"); + } + + verifyTargets(image_targets, false, false); + + checkTargetsExpired(); + } +} + +} // namespace Uptane diff --git a/src/libaktualizr/uptane/imagerepository.h b/src/libaktualizr/uptane/imagerepository.h new file mode 100644 index 0000000000..678239c506 --- /dev/null +++ b/src/libaktualizr/uptane/imagerepository.h @@ -0,0 +1,52 @@ +#ifndef IMAGE_REPOSITORY_H_ +#define IMAGE_REPOSITORY_H_ + +#include +#include + +#include "uptanerepository.h" + +namespace Uptane { + +constexpr int kDelegationsMaxDepth = 5; + +class ImageRepository : public RepositoryCommon { + public: + ImageRepository() : RepositoryCommon(RepositoryType::Image()) {} + + void resetMeta(); + + void verifyTargets(const std::string& targets_raw, bool prefetch, bool hash_change_expected); + + void verifyTimestamp(const std::string& timestamp_raw); + + void verifySnapshot(const std::string& snapshot_raw, bool prefetch); + + static std::shared_ptr verifyDelegation(const std::string& delegation_raw, const Uptane::Role& role, + const Targets& parent_target); + std::shared_ptr getTargets() const { return targets; } + + void verifyRoleHashes(const std::string& role_data, const Uptane::Role& role, bool prefetch) const; + int getRoleVersion(const Uptane::Role& role) const; + int64_t getRoleSize(const Uptane::Role& role) const; + + void checkMetaOffline(INvStorage& storage); + void updateRoot(INvStorage& storage, const IMetadataFetcher& fetcher); + void updateMeta(INvStorage& storage, const IMetadataFetcher& fetcher) override; + + private: + void checkTimestampExpired(); + void checkSnapshotExpired(); + int64_t snapshotSize() const { return timestamp.snapshot_size(); } + void fetchSnapshot(INvStorage& storage, const IMetadataFetcher& fetcher, int local_version); + void fetchTargets(INvStorage& storage, const IMetadataFetcher& fetcher, int local_version); + void checkTargetsExpired(); + + std::shared_ptr targets; + Uptane::TimestampMeta timestamp; + Uptane::Snapshot snapshot; +}; + +} // namespace Uptane + +#endif // IMAGE_REPOSITORY_H_ diff --git a/src/libaktualizr/uptane/imagesrepository.cc b/src/libaktualizr/uptane/imagesrepository.cc deleted file mode 100644 index 2e112f16f5..0000000000 --- a/src/libaktualizr/uptane/imagesrepository.cc +++ /dev/null @@ -1,359 +0,0 @@ -#include "imagesrepository.h" - -namespace Uptane { - -void ImagesRepository::resetMeta() { - resetRoot(); - targets.reset(); - snapshot = Snapshot(); - timestamp = TimestampMeta(); -} - -bool ImagesRepository::verifyTimestamp(const std::string& timestamp_raw) { - try { - // Verify the signature: - timestamp = - TimestampMeta(RepositoryType::Image(), Utils::parseJSON(timestamp_raw), std::make_shared(root)); - } catch (const Exception& e) { - LOG_ERROR << "Signature verification for timestamp metadata failed"; - last_exception = e; - return false; - } - return true; -} - -bool ImagesRepository::verifySnapshot(const std::string& snapshot_raw) { - try { - const std::string canonical = Utils::jsonToCanonicalStr(Utils::parseJSON(snapshot_raw)); - bool hash_exists = false; - for (const auto& it : timestamp.snapshot_hashes()) { - switch (it.type()) { - case Hash::Type::kSha256: - if (Hash(Hash::Type::kSha256, boost::algorithm::hex(Crypto::sha256digest(canonical))) != it) { - LOG_ERROR << "Hash verification for snapshot metadata failed"; - return false; - } - hash_exists = true; - break; - case Hash::Type::kSha512: - if (Hash(Hash::Type::kSha512, boost::algorithm::hex(Crypto::sha512digest(canonical))) != it) { - LOG_ERROR << "Hash verification for snapshot metadata failed"; - return false; - } - hash_exists = true; - break; - default: - break; - } - } - if (!hash_exists) { - LOG_ERROR << "No hash found for shapshot.json"; - return false; - } - // Verify the signature: - snapshot = Snapshot(RepositoryType::Image(), Utils::parseJSON(snapshot_raw), std::make_shared(root)); - if (snapshot.version() != timestamp.snapshot_version()) { - return false; - } - } catch (const Exception& e) { - LOG_ERROR << "Signature verification for snapshot metadata failed"; - last_exception = e; - return false; - } - return true; -} - -bool ImagesRepository::verifyRoleHashes(const std::string& role_data, const Uptane::Role& role) const { - const std::string canonical = Utils::jsonToCanonicalStr(Utils::parseJSON(role_data)); - // Hashes are not required. If present, however, we may as well check them. - // This provides no security benefit, but may help with fault detection. - for (const auto& it : snapshot.role_hashes(role)) { - switch (it.type()) { - case Hash::Type::kSha256: - if (Hash(Hash::Type::kSha256, boost::algorithm::hex(Crypto::sha256digest(canonical))) != it) { - LOG_ERROR << "Hash verification for " << role.ToString() << " metadata failed"; - return false; - } - break; - case Hash::Type::kSha512: - if (Hash(Hash::Type::kSha512, boost::algorithm::hex(Crypto::sha512digest(canonical))) != it) { - LOG_ERROR << "Hash verification for " << role.ToString() << " metadata failed"; - return false; - } - break; - default: - break; - } - } - - return true; -} - -int ImagesRepository::getRoleVersion(const Uptane::Role& role) const { return snapshot.role_version(role); } - -int64_t ImagesRepository::getRoleSize(const Uptane::Role& role) const { return snapshot.role_size(role); } - -bool ImagesRepository::verifyTargets(const std::string& targets_raw) { - try { - if (!verifyRoleHashes(targets_raw, Uptane::Role::Targets())) { - return false; - } - - auto targets_json = Utils::parseJSON(targets_raw); - - // Verify the signature: - auto signer = std::make_shared(root); - targets = std::make_shared( - Targets(RepositoryType::Image(), Uptane::Role::Targets(), targets_json, signer)); - - if (targets->version() != snapshot.role_version(Uptane::Role::Targets())) { - return false; - } - } catch (const Exception& e) { - LOG_ERROR << "Signature verification for images targets metadata failed"; - last_exception = e; - return false; - } - return true; -} - -std::shared_ptr ImagesRepository::verifyDelegation(const std::string& delegation_raw, - const Uptane::Role& role, - const Targets& parent_target) { - try { - const Json::Value delegation_json = Utils::parseJSON(delegation_raw); - const std::string canonical = Utils::jsonToCanonicalStr(delegation_json); - - // Verify the signature: - auto signer = std::make_shared(parent_target); - return std::make_shared(Targets(RepositoryType::Image(), role, delegation_json, signer)); - } catch (const Exception& e) { - LOG_ERROR << "Signature verification for images delegated targets metadata failed"; - throw e; - } - - return std::shared_ptr(nullptr); -} - -bool ImagesRepository::updateMeta(INvStorage& storage, Fetcher& fetcher) { - resetMeta(); - // Load Initial Images Root Metadata - { - std::string images_root; - if (storage.loadLatestRoot(&images_root, RepositoryType::Image())) { - if (!initRoot(images_root)) { - return false; - } - } else { - if (!fetcher.fetchRole(&images_root, kMaxRootSize, RepositoryType::Image(), Role::Root(), Version(1))) { - return false; - } - if (!initRoot(images_root)) { - return false; - } - storage.storeRoot(images_root, RepositoryType::Image(), Version(1)); - } - } - - // Update Image Root Metadata - { - std::string images_root; - if (!fetcher.fetchLatestRole(&images_root, kMaxRootSize, RepositoryType::Image(), Role::Root())) { - return false; - } - int remote_version = extractVersionUntrusted(images_root); - int local_version = rootVersion(); - - for (int version = local_version + 1; version <= remote_version; ++version) { - if (!fetcher.fetchRole(&images_root, kMaxRootSize, RepositoryType::Image(), Role::Root(), Version(version))) { - return false; - } - if (!verifyRoot(images_root)) { - return false; - } - storage.storeRoot(images_root, RepositoryType::Image(), Version(version)); - storage.clearNonRootMeta(RepositoryType::Image()); - } - - if (rootExpired()) { - return false; - } - } - - // Update Images Timestamp Metadata - { - std::string images_timestamp; - - if (!fetcher.fetchLatestRole(&images_timestamp, kMaxTimestampSize, RepositoryType::Image(), Role::Timestamp())) { - return false; - } - int remote_version = extractVersionUntrusted(images_timestamp); - - int local_version; - std::string images_timestamp_stored; - if (storage.loadNonRoot(&images_timestamp_stored, RepositoryType::Image(), Role::Timestamp())) { - local_version = extractVersionUntrusted(images_timestamp_stored); - } else { - local_version = -1; - } - - if (!verifyTimestamp(images_timestamp)) { - return false; - } - - if (local_version > remote_version) { - return false; - } else if (local_version < remote_version) { - storage.storeNonRoot(images_timestamp, RepositoryType::Image(), Role::Timestamp()); - } - - if (timestampExpired()) { - return false; - } - } - - // Update Images Snapshot Metadata - { - std::string images_snapshot; - - int64_t snapshot_size = (snapshotSize() > 0) ? snapshotSize() : kMaxSnapshotSize; - if (!fetcher.fetchLatestRole(&images_snapshot, snapshot_size, RepositoryType::Image(), Role::Snapshot())) { - return false; - } - int remote_version = extractVersionUntrusted(images_snapshot); - - int local_version; - std::string images_snapshot_stored; - if (storage.loadNonRoot(&images_snapshot_stored, RepositoryType::Image(), Role::Snapshot())) { - local_version = extractVersionUntrusted(images_snapshot_stored); - } else { - local_version = -1; - } - - if (!verifySnapshot(images_snapshot)) { - return false; - } - - if (local_version > remote_version) { - return false; - } else if (local_version < remote_version) { - storage.storeNonRoot(images_snapshot, RepositoryType::Image(), Role::Snapshot()); - } - - if (snapshotExpired()) { - return false; - } - } - - // Update Images Targets Metadata - { - std::string images_targets; - Role targets_role = Role::Targets(); - - auto targets_size = getRoleSize(Role::Targets()); - if (targets_size <= 0) { - targets_size = kMaxImagesTargetsSize; - } - if (!fetcher.fetchLatestRole(&images_targets, targets_size, RepositoryType::Image(), targets_role)) { - return false; - } - int remote_version = extractVersionUntrusted(images_targets); - - int local_version; - std::string images_targets_stored; - if (storage.loadNonRoot(&images_targets_stored, RepositoryType::Image(), targets_role)) { - local_version = extractVersionUntrusted(images_targets_stored); - } else { - local_version = -1; - } - - if (!verifyTargets(images_targets)) { - return false; - } - - if (local_version > remote_version) { - return false; - } else if (local_version < remote_version) { - storage.storeNonRoot(images_targets, RepositoryType::Image(), targets_role); - } - - if (targetsExpired()) { - return false; - } - } - - return true; -} - -bool ImagesRepository::checkMetaOffline(INvStorage& storage) { - resetMeta(); - // Load Images Root Metadata - { - std::string images_root; - if (!storage.loadLatestRoot(&images_root, RepositoryType::Image())) { - return false; - } - - if (!initRoot(images_root)) { - return false; - } - - if (rootExpired()) { - return false; - } - } - - // Load Images Timestamp Metadata - { - std::string images_timestamp; - if (!storage.loadNonRoot(&images_timestamp, RepositoryType::Image(), Role::Timestamp())) { - return false; - } - - if (!verifyTimestamp(images_timestamp)) { - return false; - } - - if (timestampExpired()) { - return false; - } - } - - // Load Images Snapshot Metadata - { - std::string images_snapshot; - - if (!storage.loadNonRoot(&images_snapshot, RepositoryType::Image(), Role::Snapshot())) { - return false; - } - - if (!verifySnapshot(images_snapshot)) { - return false; - } - - if (snapshotExpired()) { - return false; - } - } - - // Load Images Targets Metadata - { - std::string images_targets; - Role targets_role = Uptane::Role::Targets(); - if (!storage.loadNonRoot(&images_targets, RepositoryType::Image(), targets_role)) { - return false; - } - - if (!verifyTargets(images_targets)) { - return false; - } - - if (targetsExpired()) { - return false; - } - } - - return true; -} - -} // namespace Uptane diff --git a/src/libaktualizr/uptane/imagesrepository.h b/src/libaktualizr/uptane/imagesrepository.h deleted file mode 100644 index afcdb9750d..0000000000 --- a/src/libaktualizr/uptane/imagesrepository.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef IMAGES_REPOSITORY_H_ -#define IMAGES_REPOSITORY_H_ - -#include -#include - -#include "fetcher.h" -#include "uptanerepository.h" - -namespace Uptane { - -constexpr int kDelegationsMaxDepth = 5; - -class ImagesRepository : public RepositoryCommon { - public: - ImagesRepository() : RepositoryCommon(RepositoryType::Image()) {} - - void resetMeta(); - - bool verifyTargets(const std::string& targets_raw); - bool targetsExpired() { return targets->isExpired(TimeStamp::Now()); } - - bool verifyTimestamp(const std::string& timestamp_raw); - bool timestampExpired() { return timestamp.isExpired(TimeStamp::Now()); } - - bool verifySnapshot(const std::string& snapshot_raw); - bool snapshotExpired() { return snapshot.isExpired(TimeStamp::Now()); } - int64_t snapshotSize() { return timestamp.snapshot_size(); } - - Exception getLastException() const { return last_exception; } - - static std::shared_ptr verifyDelegation(const std::string& delegation_raw, const Uptane::Role& role, - const Targets& parent_target); - std::shared_ptr getTargets() const { return targets; } - - bool verifyRoleHashes(const std::string& role_data, const Uptane::Role& role) const; - int getRoleVersion(const Uptane::Role& role) const; - int64_t getRoleSize(const Uptane::Role& role) const; - - bool checkMetaOffline(INvStorage& storage); - bool updateMeta(INvStorage& storage, Fetcher& fetcher); - - private: - std::shared_ptr targets; - Uptane::TimestampMeta timestamp; - Uptane::Snapshot snapshot; - - Exception last_exception{"", ""}; -}; - -} // namespace Uptane - -#endif // IMAGES_REPOSITORY_H diff --git a/src/libaktualizr/uptane/isotpsecondary.cc b/src/libaktualizr/uptane/isotpsecondary.cc deleted file mode 100644 index 27c8db483d..0000000000 --- a/src/libaktualizr/uptane/isotpsecondary.cc +++ /dev/null @@ -1,171 +0,0 @@ -#include "isotpsecondary.h" - -#include -#include -#include -#include - -#include -#include -#include - -#define LIBUPTINY_ISOTP_PRIMARY_CANID 0x7D8 - -constexpr size_t kChunkSize = 500; - -enum class IsoTpUptaneMesType { - kGetSerial = 0x01, - kGetSerialResp = 0x41, - kGetHwId = 0x02, - kGetHwIdResp = 0x42, - kGetPkey = 0x03, - kGetPkeyResp = 0x43, - kGetRootVer = 0x04, - kGetRootVerResp = 0x44, - kGetManifest = 0x05, - kGetManifestResp = 0x45, - kPutRoot = 0x06, - kPutTargets = 0x07, - kPutImageChunk = 0x08, - kPutImageChunkAckErr = 0x48, -}; - -namespace Uptane { - -IsoTpSecondary::IsoTpSecondary(const std::string& can_iface, uint16_t can_id) - : conn(can_iface, LIBUPTINY_ISOTP_PRIMARY_CANID, can_id) {} - -EcuSerial IsoTpSecondary::getSerial() { - std::string out; - std::string in; - - out += static_cast(IsoTpUptaneMesType::kGetSerial); - if (!conn.SendRecv(out, &in)) { - return EcuSerial::Unknown(); - } - - if (in[0] != static_cast(IsoTpUptaneMesType::kGetSerialResp)) { - return EcuSerial::Unknown(); - } - return EcuSerial(in.substr(1)); -} - -HardwareIdentifier IsoTpSecondary::getHwId() { - std::string out; - std::string in; - - out += static_cast(IsoTpUptaneMesType::kGetHwId); - if (!conn.SendRecv(out, &in)) { - return HardwareIdentifier::Unknown(); - } - - if (in[0] != static_cast(IsoTpUptaneMesType::kGetHwIdResp)) { - return HardwareIdentifier::Unknown(); - } - return HardwareIdentifier(in.substr(1)); -} - -PublicKey IsoTpSecondary::getPublicKey() { - std::string out; - std::string in; - - out += static_cast(IsoTpUptaneMesType::kGetPkey); - if (!conn.SendRecv(out, &in)) { - return PublicKey("", KeyType::kUnknown); - } - - if (in[0] != static_cast(IsoTpUptaneMesType::kGetPkeyResp)) { - return PublicKey("", KeyType::kUnknown); - } - return PublicKey(boost::algorithm::hex(in.substr(1)), KeyType::kED25519); -} - -Json::Value IsoTpSecondary::getManifest() { - std::string out; - std::string in; - - out += static_cast(IsoTpUptaneMesType::kGetManifest); - if (!conn.SendRecv(out, &in)) { - return Json::nullValue; - } - - if (in[0] != static_cast(IsoTpUptaneMesType::kGetManifestResp)) { - return Json::nullValue; - } - return Utils::parseJSON(in.substr(1)); -} - -int IsoTpSecondary::getRootVersion(bool director) { - if (!director) { - return 0; - } - - std::string out; - std::string in; - - out += static_cast(IsoTpUptaneMesType::kGetRootVer); - if (!conn.SendRecv(out, &in)) { - return -1; - } - - if (in[0] != static_cast(IsoTpUptaneMesType::kGetRootVerResp)) { - return -1; - } - try { - return boost::lexical_cast(in.substr(1)); - } catch (boost::bad_lexical_cast const&) { - return -1; - } -} - -bool IsoTpSecondary::putRoot(const std::string& root, bool director) { - if (!director) { - return true; - } - std::string out; - out += static_cast(IsoTpUptaneMesType::kPutRoot); - out += root; - - return conn.Send(out); -} - -bool IsoTpSecondary::putMetadata(const RawMetaPack& meta_pack) { - std::string out; - out += static_cast(IsoTpUptaneMesType::kPutTargets); - out += meta_pack.director_targets; - - return conn.Send(out); -} - -bool IsoTpSecondary::sendFirmware(const std::shared_ptr& data) { - size_t num_chunks = 1 + (data->length() - 1) / kChunkSize; - - if (num_chunks > 127) { - return false; - } - - for (size_t i = 0; i < num_chunks; ++i) { - std::string out; - std::string in; - out += static_cast(IsoTpUptaneMesType::kPutImageChunk); - out += static_cast(num_chunks); - out += static_cast(i + 1); - if (i == num_chunks - 1) { - out += data->substr(static_cast(i * kChunkSize)); - } else { - out += data->substr(static_cast(i * kChunkSize), static_cast(kChunkSize)); - } - if (!conn.SendRecv(out, &in)) { - return false; - } - if (in[0] != static_cast(IsoTpUptaneMesType::kPutImageChunkAckErr)) { - return false; - } - - if (in[1] != 0x00) { - return false; - } - } - return true; -} -} // namespace Uptane diff --git a/src/libaktualizr/uptane/isotpsecondary.h b/src/libaktualizr/uptane/isotpsecondary.h deleted file mode 100644 index 0bb59121c3..0000000000 --- a/src/libaktualizr/uptane/isotpsecondary.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef UPTANE_ISOTPSECONDARY_H_ -#define UPTANE_ISOTPSECONDARY_H_ - -#include "isotp_conn/isotp_conn.h" -#include "secondaryinterface.h" - -namespace Uptane { - -class IsoTpSecondary : public SecondaryInterface { - public: - explicit IsoTpSecondary(const std::string& can_iface, uint16_t can_id); - - EcuSerial getSerial() override; - HardwareIdentifier getHwId() override; - PublicKey getPublicKey() override; - bool putMetadata(const RawMetaPack& meta_pack) override; - int getRootVersion(bool director) override; - bool putRoot(const std::string& root, bool director) override; - bool sendFirmware(const std::shared_ptr& data) override; - Json::Value getManifest() override; - - private: - IsoTpSendRecv conn; -}; -} // namespace Uptane -#endif // UPTANE_ISOTPSECONDARY_H_ diff --git a/src/libaktualizr/uptane/iterator.cc b/src/libaktualizr/uptane/iterator.cc index 29ce80addc..aedf7056d5 100644 --- a/src/libaktualizr/uptane/iterator.cc +++ b/src/libaktualizr/uptane/iterator.cc @@ -1,18 +1,21 @@ #include "iterator.h" +#include "storage/invstorage.h" +#include "uptane/exceptions.h" + namespace Uptane { Targets getTrustedDelegation(const Role &delegate_role, const Targets &parent_targets, - const ImagesRepository &images_repo, INvStorage &storage, Fetcher &fetcher, + const ImageRepository &image_repo, INvStorage &storage, Fetcher &fetcher, const bool offline) { std::string delegation_meta; - auto version_in_snapshot = images_repo.getRoleVersion(delegate_role); + auto version_in_snapshot = image_repo.getRoleVersion(delegate_role); if (storage.loadDelegation(&delegation_meta, delegate_role)) { auto version = extractVersionUntrusted(delegation_meta); if (version > version_in_snapshot) { - throw SecurityException("images", "Rollback attempt on delegated targets"); + throw SecurityException("image", "Rollback attempt on delegated targets"); } else if (version < version_in_snapshot) { delegation_meta.clear(); storage.deleteDelegation(delegate_role); @@ -25,24 +28,29 @@ Targets getTrustedDelegation(const Role &delegate_role, const Targets &parent_ta if (offline) { throw Uptane::DelegationMissing(delegate_role.ToString()); } - if (!fetcher.fetchLatestRole(&delegation_meta, Uptane::kMaxImagesTargetsSize, RepositoryType::Image(), - delegate_role)) { + try { + fetcher.fetchLatestRole(&delegation_meta, Uptane::kMaxImageTargetsSize, RepositoryType::Image(), delegate_role); + } catch (const std::exception &e) { + LOG_ERROR << "Fetch role error: " << e.what(); throw Uptane::DelegationMissing(delegate_role.ToString()); } } - if (!images_repo.verifyRoleHashes(delegation_meta, delegate_role)) { + try { + image_repo.verifyRoleHashes(delegation_meta, delegate_role, false); + } catch (const std::exception &e) { + LOG_ERROR << "Role hashes error: " << e.what(); throw Uptane::DelegationHashMismatch(delegate_role.ToString()); } - auto delegation = ImagesRepository::verifyDelegation(delegation_meta, delegate_role, parent_targets); + auto delegation = ImageRepository::verifyDelegation(delegation_meta, delegate_role, parent_targets); if (delegation == nullptr) { - throw SecurityException("images", "Delegation verification failed"); + throw SecurityException("image", "Delegation verification failed"); } if (delegation_remote) { if (delegation->version() != version_in_snapshot) { - throw VersionMismatch("images", delegate_role.ToString()); + throw VersionMismatch("image", delegate_role.ToString()); } storage.storeDelegation(delegation_meta, delegate_role); } @@ -50,7 +58,7 @@ Targets getTrustedDelegation(const Role &delegate_role, const Targets &parent_ta return *delegation; } -LazyTargetsList::DelegationIterator::DelegationIterator(const ImagesRepository &repo, +LazyTargetsList::DelegationIterator::DelegationIterator(const ImageRepository &repo, std::shared_ptr storage, std::shared_ptr fetcher, bool is_end) : repo_{repo}, storage_{std::move(storage)}, fetcher_{std::move(fetcher)}, is_end_{is_end} { @@ -69,7 +77,7 @@ void LazyTargetsList::DelegationIterator::renewTargetsData() { } else { // go to the top of the delegation tree std::stack>::size_type> indices; - auto node = tree_node_->parent; + auto *node = tree_node_->parent; while (node->parent != nullptr) { indices.push(node->parent_idx); node = node->parent; @@ -99,7 +107,7 @@ bool LazyTargetsList::DelegationIterator::operator==(const LazyTargetsList::Dele const Target &LazyTargetsList::DelegationIterator::operator*() { if (is_end_) { - throw std::runtime_error("Inconsistent delegation iterator"); // TODO: UptaneException + throw std::runtime_error("Inconsistent delegation iterator"); } if (!cur_targets_) { @@ -107,12 +115,13 @@ const Target &LazyTargetsList::DelegationIterator::operator*() { } if (!cur_targets_ || target_idx_ >= cur_targets_->targets.size()) { - throw std::runtime_error("Inconsistent delegation iterator"); // TODO: UptaneException + throw std::runtime_error("Inconsistent delegation iterator"); } return cur_targets_->targets[target_idx_]; } +// NOLINTNEXTLINE(misc-no-recursion) LazyTargetsList::DelegationIterator LazyTargetsList::DelegationIterator::operator++() { if (is_end_) { return *this; @@ -152,7 +161,7 @@ LazyTargetsList::DelegationIterator LazyTargetsList::DelegationIterator::operato } if (children_idx_ < tree_node_->children.size()) { - auto new_tree_node = tree_node_->children[children_idx_].get(); + auto *new_tree_node = tree_node_->children[children_idx_].get(); target_idx_ = 0; children_idx_ = 0; ++level_; @@ -170,7 +179,7 @@ LazyTargetsList::DelegationIterator LazyTargetsList::DelegationIterator::operato // then go to the parent delegation if (tree_node_->parent != nullptr) { - auto new_tree_node = tree_node_->parent; + auto *new_tree_node = tree_node_->parent; children_idx_ = tree_node_->parent_idx + 1; --level_; terminating_ = false; diff --git a/src/libaktualizr/uptane/iterator.h b/src/libaktualizr/uptane/iterator.h index 005737074e..e976f508e3 100644 --- a/src/libaktualizr/uptane/iterator.h +++ b/src/libaktualizr/uptane/iterator.h @@ -2,12 +2,12 @@ #define AKTUALIZR_UPTANE_ITERATOR_H_ #include "fetcher.h" -#include "imagesrepository.h" +#include "imagerepository.h" namespace Uptane { Targets getTrustedDelegation(const Role &delegate_role, const Targets &parent_targets, - const ImagesRepository &images_repo, INvStorage &storage, Fetcher &fetcher, bool offline); + const ImageRepository &image_repo, INvStorage &storage, Fetcher &fetcher, bool offline); class LazyTargetsList { public: @@ -26,7 +26,7 @@ class LazyTargetsList { using reference = Uptane::Target &; public: - explicit DelegationIterator(const ImagesRepository &repo, std::shared_ptr storage, + explicit DelegationIterator(const ImageRepository &repo, std::shared_ptr storage, std::shared_ptr fetcher, bool is_end = false); DelegationIterator operator++(); bool operator==(const DelegationIterator &other) const; @@ -36,7 +36,7 @@ class LazyTargetsList { private: std::shared_ptr tree_; DelegatedTargetTreeNode *tree_node_; - const ImagesRepository &repo_; + const ImageRepository &repo_; std::shared_ptr storage_; std::shared_ptr fetcher_; std::shared_ptr cur_targets_; @@ -49,14 +49,14 @@ class LazyTargetsList { void renewTargetsData(); }; - explicit LazyTargetsList(const ImagesRepository &repo, std::shared_ptr storage, + explicit LazyTargetsList(const ImageRepository &repo, std::shared_ptr storage, std::shared_ptr fetcher) : repo_{repo}, storage_{std::move(storage)}, fetcher_{std::move(fetcher)} {} DelegationIterator begin() { return DelegationIterator(repo_, storage_, fetcher_); } DelegationIterator end() { return DelegationIterator(repo_, storage_, fetcher_, true); } private: - const ImagesRepository &repo_; + const ImageRepository &repo_; std::shared_ptr storage_; std::shared_ptr fetcher_; }; diff --git a/src/libaktualizr/uptane/manifest.cc b/src/libaktualizr/uptane/manifest.cc new file mode 100644 index 0000000000..2041dc026c --- /dev/null +++ b/src/libaktualizr/uptane/manifest.cc @@ -0,0 +1,98 @@ +#include "manifest.h" + +#include + +#include "crypto/keymanager.h" +#include "logging/logging.h" + +namespace Uptane { + +std::string Manifest::filepath() const { + try { + return (*this)["signed"]["installed_image"]["filepath"].asString(); + } catch (const std::exception &ex) { + LOG_ERROR << "Unable to parse manifest: " << ex.what(); + return ""; + } +} + +Hash Manifest::installedImageHash() const { + try { + return Hash(Hash::Type::kSha256, (*this)["signed"]["installed_image"]["fileinfo"]["hashes"]["sha256"].asString()); + } catch (const std::exception &ex) { + LOG_ERROR << "Unable to parse manifest: " << ex.what(); + return Hash(Hash::Type::kUnknownAlgorithm, ""); + } +} + +std::string Manifest::signature() const { + try { + return (*this)["signatures"][0]["sig"].asString(); + } catch (const std::exception &ex) { + LOG_ERROR << "Unable to parse manifest: " << ex.what(); + return ""; + } +} + +std::string Manifest::signedBody() const { + try { + return Utils::jsonToCanonicalStr((*this)["signed"]); + } catch (const std::exception &ex) { + LOG_ERROR << "Unable to parse manifest: " << ex.what(); + return ""; + } +} + +bool Manifest::verifySignature(const PublicKey &pub_key) const { + if (!(isMember("signatures") && isMember("signed"))) { + LOG_ERROR << "Missing either signature or the signing body/subject: " << *this; + return false; + } + + return pub_key.VerifySignature(signature(), signedBody()); +} + +Manifest ManifestIssuer::sign(const Manifest &manifest, const std::string &report_counter) const { + Manifest manifest_to_sign = manifest; + if (!report_counter.empty()) { + manifest_to_sign["report_counter"] = report_counter; + } + return key_mngr_->signTuf(manifest_to_sign); +} + +Manifest ManifestIssuer::assembleManifest(const InstalledImageInfo &installed_image_info, + const Uptane::EcuSerial &ecu_serial) { + Json::Value installed_image; + installed_image["filepath"] = installed_image_info.name; + installed_image["fileinfo"]["length"] = Json::UInt64(installed_image_info.len); + installed_image["fileinfo"]["hashes"]["sha256"] = installed_image_info.hash; + + Json::Value unsigned_ecu_version; + unsigned_ecu_version["attacks_detected"] = ""; + unsigned_ecu_version["installed_image"] = installed_image; + unsigned_ecu_version["ecu_serial"] = ecu_serial.ToString(); + unsigned_ecu_version["previous_timeserver_time"] = "1970-01-01T00:00:00Z"; + unsigned_ecu_version["timeserver_time"] = "1970-01-01T00:00:00Z"; + return unsigned_ecu_version; +} + +Hash ManifestIssuer::generateVersionHash(const std::string &data) { return Hash::generate(Hash::Type::kSha256, data); } + +std::string ManifestIssuer::generateVersionHashStr(const std::string &data) { + // think of unifying a hash case,we use both lower and upper cases + return boost::algorithm::to_lower_copy(generateVersionHash(data).HashString()); +} + +Manifest ManifestIssuer::assembleManifest(const InstalledImageInfo &installed_image_info) const { + return assembleManifest(installed_image_info, ecu_serial_); +} + +Manifest ManifestIssuer::assembleManifest(const Uptane::Target &target) const { + return assembleManifest(target.getTargetImageInfo()); +} + +Manifest ManifestIssuer::assembleAndSignManifest(const InstalledImageInfo &installed_image_info) const { + return key_mngr_->signTuf(assembleManifest(installed_image_info)); +} + +} // namespace Uptane diff --git a/src/libaktualizr/uptane/manifest.h b/src/libaktualizr/uptane/manifest.h new file mode 100644 index 0000000000..22b507d58d --- /dev/null +++ b/src/libaktualizr/uptane/manifest.h @@ -0,0 +1,38 @@ +#ifndef AKTUALIZR_UPTANE_MANIFEST_H +#define AKTUALIZR_UPTANE_MANIFEST_H + +#include + +#include "json/json.h" +#include "libaktualizr/types.h" + +class KeyManager; + +namespace Uptane { + +class ManifestIssuer { + public: + using Ptr = std::shared_ptr; + + ManifestIssuer(std::shared_ptr &key_mngr, Uptane::EcuSerial ecu_serial) + : ecu_serial_(std::move(ecu_serial)), key_mngr_(key_mngr) {} + + static Manifest assembleManifest(const InstalledImageInfo &installed_image_info, const Uptane::EcuSerial &ecu_serial); + static Hash generateVersionHash(const std::string &data); + static std::string generateVersionHashStr(const std::string &data); + + Manifest sign(const Manifest &manifest, const std::string &report_counter = "") const; + + Manifest assembleManifest(const InstalledImageInfo &installed_image_info) const; + Manifest assembleManifest(const Uptane::Target &target) const; + + Manifest assembleAndSignManifest(const InstalledImageInfo &installed_image_info) const; + + private: + const Uptane::EcuSerial ecu_serial_; + std::shared_ptr key_mngr_; +}; + +} // namespace Uptane + +#endif // AKTUALIZR_UPTANE_MANIFEST_H diff --git a/src/libaktualizr/uptane/metawithkeys.cc b/src/libaktualizr/uptane/metawithkeys.cc index f292407d8e..5672ae1050 100644 --- a/src/libaktualizr/uptane/metawithkeys.cc +++ b/src/libaktualizr/uptane/metawithkeys.cc @@ -1,6 +1,10 @@ +#include "uptane/tuf.h" + +#include + #include "logging/logging.h" #include "uptane/exceptions.h" -#include "uptane/tuf.h" +#include "utilities/utils.h" using Uptane::MetaWithKeys; @@ -10,7 +14,7 @@ MetaWithKeys::MetaWithKeys(RepositoryType repo, const Role &role, const Json::Va : BaseMeta(repo, role, json, signer) {} void Uptane::MetaWithKeys::ParseKeys(const RepositoryType repo, const Json::Value &keys) { - for (Json::ValueIterator it = keys.begin(); it != keys.end(); ++it) { + for (auto it = keys.begin(); it != keys.end(); ++it) { const std::string key_type = boost::algorithm::to_lower_copy((*it)["keytype"].asString()); if (key_type != "rsa" && key_type != "ed25519") { throw SecurityException(repo, "Unsupported key type: " + (*it)["keytype"].asString()); @@ -21,11 +25,11 @@ void Uptane::MetaWithKeys::ParseKeys(const RepositoryType repo, const Json::Valu } } -void Uptane::MetaWithKeys::ParseRole(const RepositoryType repo, const Json::ValueIterator &it, const Role &role, +void Uptane::MetaWithKeys::ParseRole(const RepositoryType repo, const Json::ValueConstIterator &it, const Role &role, const std::string &meta_role) { if (role == Role::InvalidRole()) { LOG_WARNING << "Invalid role in " << meta_role << ".json"; - LOG_TRACE << "Role name:" << role.ToString(); + LOG_TRACE << "Role name:" << role; return; } // Threshold @@ -50,7 +54,7 @@ void Uptane::MetaWithKeys::ParseRole(const RepositoryType repo, const Json::Valu // KeyIds const Json::Value keyids = (*it)["keyids"]; - for (Json::ValueIterator itk = keyids.begin(); itk != keyids.end(); ++itk) { + for (auto itk = keyids.begin(); itk != keyids.end(); ++itk) { keys_for_role_.insert(std::make_pair(role, (*itk).asString())); } } @@ -71,12 +75,12 @@ void Uptane::MetaWithKeys::UnpackSignedObject(const RepositoryType repo, const R "Metadata type " + type.ToString() + " does not match expected role " + role.ToString()); } - const std::string canonical = Json::FastWriter().write(signed_object["signed"]); + const std::string canonical = Utils::jsonToCanonicalStr(signed_object["signed"]); const Json::Value signatures = signed_object["signatures"]; int valid_signatures = 0; std::set used_keyids; - for (Json::ValueIterator sig = signatures.begin(); sig != signatures.end(); ++sig) { + for (auto sig = signatures.begin(); sig != signatures.end(); ++sig) { const std::string keyid = (*sig)["keyid"].asString(); if (used_keyids.count(keyid) != 0) { throw NonUniqueSignatures(repository, role.ToString()); @@ -90,13 +94,13 @@ void Uptane::MetaWithKeys::UnpackSignedObject(const RepositoryType repo, const R throw SecurityException(repository, std::string("Unsupported sign method: ") + (*sig)["method"].asString()); } - if (keys_.count(keyid) == 0u) { + if (keys_.count(keyid) == 0U) { LOG_DEBUG << "Signed by unknown KeyId: " << keyid << ". Skipping."; continue; } - if (keys_for_role_.count(std::make_pair(role, keyid)) == 0u) { - LOG_WARNING << "KeyId " << keyid << " is not valid to sign for this role (" << role.ToString() << ")."; + if (keys_for_role_.count(std::make_pair(role, keyid)) == 0U) { + LOG_WARNING << "KeyId " << keyid << " is not valid to sign for this role (" << role << ")."; continue; } const std::string signature = (*sig)["sig"].asString(); diff --git a/src/libaktualizr/uptane/role.cc b/src/libaktualizr/uptane/role.cc index 928c951e1a..bb2fc02a72 100644 --- a/src/libaktualizr/uptane/role.cc +++ b/src/libaktualizr/uptane/role.cc @@ -2,6 +2,8 @@ #include +#include "uptane/exceptions.h" + using Uptane::Role; using Uptane::Version; @@ -46,6 +48,6 @@ std::string Version::RoleFileName(const Role &role) const { if (version_ != Version::ANY_VERSION) { ss << version_ << "."; } - ss << role.ToString() << ".json"; + ss << role << ".json"; return ss.str(); } diff --git a/src/libaktualizr/uptane/root.cc b/src/libaktualizr/uptane/root.cc index d1ca94b59e..634f579b2f 100644 --- a/src/libaktualizr/uptane/root.cc +++ b/src/libaktualizr/uptane/root.cc @@ -10,7 +10,7 @@ Root::Root(const RepositoryType repo, const Json::Value &json, Root &root) : Roo } Root::Root(const RepositoryType repo, const Json::Value &json) : MetaWithKeys(json), policy_(Policy::kCheck) { - if (!json["signed"].isMember("keys")) { + if (!json["signed"].isMember("keys")) { // NOLINT(bugprone-branch-clone) throw InvalidMetadata(repo, "root", "missing keys field"); } else if (!json["signed"].isMember("roles")) { throw InvalidMetadata(repo, "root", "missing roles field"); @@ -20,7 +20,7 @@ Root::Root(const RepositoryType repo, const Json::Value &json) : MetaWithKeys(js ParseKeys(repo, keys); const Json::Value roles = json["signed"]["roles"]; - for (Json::ValueIterator it = roles.begin(); it != roles.end(); it++) { + for (auto it = roles.begin(); it != roles.end(); it++) { const Role role = Role(it.key().asString()); ParseRole(repo, it, role, "root"); } diff --git a/src/libaktualizr/uptane/secondary_metadata.cc b/src/libaktualizr/uptane/secondary_metadata.cc new file mode 100644 index 0000000000..856cdf5d55 --- /dev/null +++ b/src/libaktualizr/uptane/secondary_metadata.cc @@ -0,0 +1,52 @@ +#include "secondary_metadata.h" + +namespace Uptane { + +SecondaryMetadata::SecondaryMetadata(MetaBundle meta_bundle_in) : meta_bundle_(std::move(meta_bundle_in)) { + try { + director_root_version_ = + Version(extractVersionUntrusted(getMetaFromBundle(meta_bundle_, RepositoryType::Director(), Role::Root()))); + } catch (const std::exception& e) { + LOG_DEBUG << "Failed to read Director Root version: " << e.what(); + } + try { + image_root_version_ = + Version(extractVersionUntrusted(getMetaFromBundle(meta_bundle_, RepositoryType::Image(), Role::Root()))); + } catch (const std::exception& e) { + LOG_DEBUG << "Failed to read Image repo Root version: " << e.what(); + } +} + +void SecondaryMetadata::fetchRole(std::string* result, int64_t maxsize, RepositoryType repo, const Role& role, + Version version) const { + (void)maxsize; + getRoleMetadata(result, repo, role, version); +} + +void SecondaryMetadata::fetchLatestRole(std::string* result, int64_t maxsize, RepositoryType repo, + const Role& role) const { + (void)maxsize; + getRoleMetadata(result, repo, role, Version()); +} + +void SecondaryMetadata::getRoleMetadata(std::string* result, const RepositoryType& repo, const Role& role, + Version version) const { + if (role == Role::Root() && version != Version()) { + // If requesting a Root version beyond what we have available, fail as + // expected. If requesting a version before what is available, just use what + // is available, since root rotation isn't supported here. + if (repo == RepositoryType::Director() && director_root_version_ < version) { + LOG_DEBUG << "Requested Director Root version " << version << " but only version " << director_root_version_ + << " is available."; + throw std::runtime_error("Metadata not found"); + } else if (repo == RepositoryType::Image() && image_root_version_ < version) { + LOG_DEBUG << "Requested Image repo Root version " << version << " but only version " << image_root_version_ + << " is available."; + throw std::runtime_error("Metadata not found"); + } + } + + *result = getMetaFromBundle(meta_bundle_, repo, role); +} + +} // namespace Uptane diff --git a/src/libaktualizr/uptane/secondary_metadata.h b/src/libaktualizr/uptane/secondary_metadata.h new file mode 100644 index 0000000000..52be55ced7 --- /dev/null +++ b/src/libaktualizr/uptane/secondary_metadata.h @@ -0,0 +1,29 @@ +#ifndef AKTUALIZR_SECONDARY_METADATA_H_ +#define AKTUALIZR_SECONDARY_METADATA_H_ + +#include "uptane/fetcher.h" +#include "uptane/tuf.h" + +namespace Uptane { + +class SecondaryMetadata : public IMetadataFetcher { + public: + explicit SecondaryMetadata(MetaBundle meta_bundle_in); + + void fetchRole(std::string* result, int64_t maxsize, RepositoryType repo, const Role& role, + Version version) const override; + void fetchLatestRole(std::string* result, int64_t maxsize, RepositoryType repo, const Role& role) const override; + + protected: + virtual void getRoleMetadata(std::string* result, const RepositoryType& repo, const Role& role, + Version version) const; + + private: + const MetaBundle meta_bundle_; + Version director_root_version_; + Version image_root_version_; +}; + +} // namespace Uptane + +#endif // AKTUALIZR_SECONDARY_METADATA_H_ diff --git a/src/libaktualizr/uptane/secondaryinterface.h b/src/libaktualizr/uptane/secondaryinterface.h deleted file mode 100644 index 296eabf1f8..0000000000 --- a/src/libaktualizr/uptane/secondaryinterface.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef UPTANE_SECONDARYINTERFACE_H -#define UPTANE_SECONDARYINTERFACE_H - -#include -#include - -#include "json/json.h" - -#include "uptane/tuf.h" - -/* Json snippet returned by sendMetaXXX(): - * { - * valid = true/false, - * wait_for_target = true/false - * } - */ - -namespace Uptane { - -class SecondaryInterface { - public: - // This ctor should be removed as the secondary configuration SecondaryConfig - // is the secondaries's specific, see SecondaryConfig declaration - // explicit SecondaryInterface(SecondaryConfig sconfig_in) : sconfig(std::move(sconfig_in)) {} - virtual ~SecondaryInterface() = default; - // not clear what this method for, can be removed - // virtual void Initialize(){}; // optional step, called after device registration - // should be pure virtual, since the current implementation reads from the secondaries specific config - // virtual EcuSerial getSerial() { return Uptane::EcuSerial(sconfig.ecu_serial); } - virtual EcuSerial getSerial() = 0; - // should be pure virtual, since the current implementation reads from the secondaries specific config - // virtual Uptane::HardwareIdentifier getHwId() { return Uptane::HardwareIdentifier(sconfig.ecu_hardware_id); } - virtual Uptane::HardwareIdentifier getHwId() = 0; - virtual PublicKey getPublicKey() = 0; - - // getSerial(), getHwId() and getPublicKey() can be moved to seperate interface - // since their usage pattern differ from the following methods' one - virtual Json::Value getManifest() = 0; - virtual bool putMetadata(const RawMetaPack& meta_pack) = 0; - virtual int32_t getRootVersion(bool director) = 0; - virtual bool putRoot(const std::string& root, bool director) = 0; - - // FIXME: Instead of std::string we should use StorageTargetRHandle - virtual bool sendFirmware(const std::shared_ptr& data) = 0; - // Should be removes as it's secondary specific - // const SecondaryConfig sconfig; - - // protected: - // SecondaryInterface() : sconfig{} {}; -}; -} // namespace Uptane - -#endif // UPTANE_SECONDARYINTERFACE_H diff --git a/src/libaktualizr/uptane/tuf.cc b/src/libaktualizr/uptane/tuf.cc index 1dee591c6f..31931e3c25 100644 --- a/src/libaktualizr/uptane/tuf.cc +++ b/src/libaktualizr/uptane/tuf.cc @@ -4,20 +4,22 @@ #include #include -#include #include #include #include "crypto/crypto.h" +#include "libaktualizr/types.h" #include "logging/logging.h" -#include "utilities/exceptions.h" +#include "uptane/exceptions.h" -using Uptane::Hash; -using Uptane::MetaPack; -using Uptane::Root; using Uptane::Target; using Uptane::Version; +std::ostream &Uptane::operator<<(std::ostream &os, const RepositoryType &repo_type) { + os << repo_type.ToString(); + return os; +} + std::ostream &Uptane::operator<<(std::ostream &os, const Version &v) { if (v.version_ == Version::ANY_VERSION) { os << "vANY"; @@ -27,49 +29,7 @@ std::ostream &Uptane::operator<<(std::ostream &os, const Version &v) { return os; } -std::ostream &Uptane::operator<<(std::ostream &os, const HardwareIdentifier &hwid) { - os << hwid.hwid_; - return os; -} - -std::ostream &Uptane::operator<<(std::ostream &os, const EcuSerial &ecu_serial) { - os << ecu_serial.ecu_serial_; - return os; -} - -Hash::Hash(const std::string &type, const std::string &hash) : hash_(boost::algorithm::to_upper_copy(hash)) { - if (type == "sha512") { - type_ = Hash::Type::kSha512; - } else if (type == "sha256") { - type_ = Hash::Type::kSha256; - } else { - type_ = Hash::Type::kUnknownAlgorithm; - } -} - -Hash::Hash(Type type, const std::string &hash) : type_(type), hash_(boost::algorithm::to_upper_copy(hash)) {} - -bool Hash::operator==(const Hash &other) const { return type_ == other.type_ && hash_ == other.hash_; } - -std::string Hash::TypeString() const { - switch (type_) { - case Type::kSha256: - return "sha256"; - case Type::kSha512: - return "sha512"; - default: - return "unknown"; - } -} - -Hash::Type Hash::type() const { return type_; } - -std::ostream &Uptane::operator<<(std::ostream &os, const Hash &h) { - os << "Hash: " << h.hash_; - return os; -} - -std::string Hash::encodeVector(const std::vector &hashes) { +std::string Hash::encodeVector(const std::vector &hashes) { std::stringstream hs; for (auto it = hashes.cbegin(); it != hashes.cend(); it++) { @@ -82,8 +42,8 @@ std::string Hash::encodeVector(const std::vector &hashes) { return hs.str(); } -std::vector Hash::decodeVector(std::string hashes_str) { - std::vector hash_v; +std::vector Hash::decodeVector(std::string hashes_str) { + std::vector hash_v; std::string cs = std::move(hashes_str); while (!cs.empty()) { @@ -106,8 +66,8 @@ std::vector Hash::decodeVector(std::string hashes_str) { std::string hash_value_str = hash_token.substr(cp + 1); if (!hash_value_str.empty()) { - Uptane::Hash h{hash_type_str, hash_value_str}; - if (h.type() != Uptane::Hash::Type::kUnknownAlgorithm) { + Hash h{hash_type_str, hash_value_str}; + if (h.type() != Hash::Type::kUnknownAlgorithm) { hash_v.push_back(std::move(h)); } } @@ -118,39 +78,13 @@ std::vector Hash::decodeVector(std::string hashes_str) { Target::Target(std::string filename, const Json::Value &content) : filename_(std::move(filename)) { if (content.isMember("custom")) { - custom_ = content["custom"]; - - // Images repo provides an array of hardware IDs. - if (custom_.isMember("hardwareIds")) { - Json::Value hwids = custom_["hardwareIds"]; - for (Json::ValueIterator i = hwids.begin(); i != hwids.end(); ++i) { - hwids_.emplace_back(HardwareIdentifier((*i).asString())); - } - } - - // Director provides a map of ECU serials to hardware IDs. - Json::Value ecus = custom_["ecuIdentifiers"]; - for (Json::ValueIterator i = ecus.begin(); i != ecus.end(); ++i) { - ecus_.insert({EcuSerial(i.key().asString()), HardwareIdentifier((*i)["hardwareId"].asString())}); - } - - if (custom_.isMember("targetFormat")) { - type_ = custom_["targetFormat"].asString(); - } - - if (custom_.isMember("uri")) { - std::string custom_uri = custom_["uri"].asString(); - // Ignore this exact URL for backwards compatibility with old defaults that inserted it. - if (custom_uri != "https://example.com/") { - uri_ = std::move(custom_uri); - } - } + updateCustom(content["custom"]); } length_ = content["length"].asUInt64(); - Json::Value hashes = content["hashes"]; - for (Json::ValueIterator i = hashes.begin(); i != hashes.end(); ++i) { + const Json::Value hashes = content["hashes"]; + for (auto i = hashes.begin(); i != hashes.end(); ++i) { Hash h(i.key().asString(), (*i).asString()); if (h.HaveAlgorithm()) { hashes_.push_back(h); @@ -160,8 +94,41 @@ Target::Target(std::string filename, const Json::Value &content) : filename_(std std::sort(hashes_.begin(), hashes_.end(), [](const Hash &l, const Hash &r) { return l.type() < r.type(); }); } -Target::Target(std::string filename, EcuMap ecus, std::vector hashes, uint64_t length, std::string correlation_id) +void Target::updateCustom(const Json::Value &custom) { + custom_ = custom; + + // Image repo provides an array of hardware IDs. + if (custom_.isMember("hardwareIds")) { + Json::Value hwids = custom_["hardwareIds"]; + for (auto i = hwids.begin(); i != hwids.end(); ++i) { + hwids_.emplace_back(HardwareIdentifier((*i).asString())); + } + } + + // Director provides a map of ECU serials to hardware IDs. + Json::Value ecus = custom_["ecuIdentifiers"]; + for (auto i = ecus.begin(); i != ecus.end(); ++i) { + ecus_.insert({EcuSerial(i.key().asString()), HardwareIdentifier((*i)["hardwareId"].asString())}); + } + + if (custom_.isMember("targetFormat")) { + type_ = custom_["targetFormat"].asString(); + } + + if (custom_.isMember("uri")) { + std::string custom_uri = custom_["uri"].asString(); + // Ignore this exact URL for backwards compatibility with old defaults that inserted it. + if (custom_uri != "https://example.com/") { + uri_ = std::move(custom_uri); + } + } +} + +// Internal use only. +Target::Target(std::string filename, EcuMap ecus, std::vector hashes, uint64_t length, std::string correlation_id, + std::string type) : filename_(std::move(filename)), + type_(std::move(type)), ecus_(std::move(ecus)), hashes_(std::move(hashes)), length_(length), @@ -172,7 +139,7 @@ Target::Target(std::string filename, EcuMap ecus, std::vector hashes, uint Target Target::Unknown() { Json::Value t_json; - t_json["hashes"]["sha256"] = boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha256digest(""))); + t_json["hashes"]["sha256"] = Crypto::sha256digestHex(""); t_json["length"] = 0; Uptane::Target target{"unknown", t_json}; @@ -186,7 +153,7 @@ bool Target::MatchHash(const Hash &hash) const { } std::string Target::hashString(Hash::Type type) const { - std::vector::const_iterator it; + std::vector::const_iterator it; for (it = hashes_.begin(); it != hashes_.end(); it++) { if (it->type() == type) { return boost::algorithm::to_lower_copy(it->HashString()); @@ -199,7 +166,17 @@ std::string Target::sha256Hash() const { return hashString(Hash::Type::kSha256); std::string Target::sha512Hash() const { return hashString(Hash::Type::kSha512); } +std::string Target::custom_version() const { + try { + return custom_["version"].asString(); + } catch (const std::exception &ex) { + LOG_ERROR << "Unable to parse custom version: " << ex.what(); + return ""; + } +} + bool Target::IsOstree() const { + // NOLINTNEXTLINE(bugprone-branch-clone) if (type_ == "OSTREE") { // Modern servers explicitly specify the type of the target return true; @@ -215,8 +192,8 @@ bool Target::IsOstree() const { } bool Target::MatchTarget(const Target &t2) const { - // type_ (targetFormat) is only provided by the Images repo. - // ecus_ is only provided by the Images repo. + // type_ (targetFormat) is only provided by the Image repo. + // ecus_ is only provided by the Image repo. // correlation_id_ is only provided by the Director. // uri_ is not matched. If the Director provides it, we use that. If not, but // the Image repository does, use that. Otherwise, leave it empty and use the @@ -230,7 +207,7 @@ bool Target::MatchTarget(const Target &t2) const { // If the HWID vector and ECU->HWID map match, we're good. Otherwise, assume // we have a Target from the Director (ECU->HWID map populated, HWID vector - // empty) and a Target from the Images repo (HWID vector populated, + // empty) and a Target from the Image repo (HWID vector populated, // ECU->HWID map empty). Figure out which Target has the map, and then for // every item in the map, make sure it's in the other Target's HWID vector. if (hwids_ != t2.hwids_ || ecus_ != t2.ecus_) { @@ -277,7 +254,7 @@ Json::Value Target::toDebugJson() const { } if (!hwids_.empty()) { Json::Value hwids; - for (Json::Value::ArrayIndex i = 0; i < hwids_.size(); ++i) { + for (Json::Value::ArrayIndex i = 0; i < static_cast(hwids_.size()); ++i) { hwids[i] = hwids_[i].ToString(); } res["custom"]["hardwareIds"] = hwids; @@ -323,7 +300,7 @@ void Uptane::BaseMeta::init(const Json::Value &json) { try { expiry_ = TimeStamp(json["signed"]["expires"].asString()); } catch (const TimeStamp::InvalidTimeStamp &exc) { - throw Uptane::InvalidMetadata("", "", "Invalid timestamp"); + throw Uptane::InvalidMetadata("", "", "invalid timestamp"); } original_object_ = json; } @@ -340,13 +317,34 @@ Uptane::BaseMeta::BaseMeta(RepositoryType repo, const Role &role, const Json::Va init(json); } +std::string Uptane::BaseMeta::signature() const { + if (!original_object_.isMember("signatures")) { + throw Uptane::InvalidMetadata("", "", "invalid metadata json, missing signatures"); + } + if (!original_object_["signatures"].isArray()) { + throw Uptane::InvalidMetadata("", "", "invalid metadata json, signatures are not an array"); + } + const auto signs{original_object_["signatures"]}; + if (signs.empty()) { + throw Uptane::InvalidMetadata("", "", "invalid metadata json, no any signatures found"); + } + if (signs.size() > 1) { + LOG_WARNING << "Metadata contains more than one signature\n" << original_object_; + } + if (!signs[0].isMember("sig")) { + throw Uptane::InvalidMetadata("", "", "invalid metadata json, missing signature"); + } + + return signs[0]["sig"].asString(); +} + void Uptane::Targets::init(const Json::Value &json) { if (!json.isObject() || json["signed"]["_type"] != "Targets") { throw Uptane::InvalidMetadata("", "targets", "invalid targets.json"); } const Json::Value target_list = json["signed"]["targets"]; - for (Json::ValueIterator t_it = target_list.begin(); t_it != target_list.end(); t_it++) { + for (auto t_it = target_list.begin(); t_it != target_list.end(); t_it++) { Target t(t_it.key().asString(), *t_it); targets.push_back(t); } @@ -356,7 +354,7 @@ void Uptane::Targets::init(const Json::Value &json) { ParseKeys(Uptane::RepositoryType::Image(), key_list); const Json::Value role_list = json["signed"]["delegations"]["roles"]; - for (Json::ValueIterator it = role_list.begin(); it != role_list.end(); it++) { + for (auto it = role_list.begin(); it != role_list.end(); it++) { const std::string role_name = (*it)["name"].asString(); const Role role = Role::Delegation(role_name); delegated_role_names_.push_back(role_name); @@ -364,7 +362,7 @@ void Uptane::Targets::init(const Json::Value &json) { const Json::Value paths_list = (*it)["paths"]; std::vector paths; - for (Json::ValueIterator p_it = paths_list.begin(); p_it != paths_list.end(); p_it++) { + for (auto p_it = paths_list.begin(); p_it != paths_list.end(); p_it++) { paths.emplace_back((*p_it).asString()); } paths_for_role_[role] = paths; @@ -397,7 +395,7 @@ void Uptane::TimestampMeta::init(const Json::Value &json) { throw Uptane::InvalidMetadata("", "timestamp", "invalid timestamp.json"); } - for (Json::ValueIterator it = hashes_list.begin(); it != hashes_list.end(); ++it) { + for (auto it = hashes_list.begin(); it != hashes_list.end(); ++it) { Hash h(it.key().asString(), (*it).asString()); snapshot_hashes_.push_back(h); } @@ -419,7 +417,7 @@ void Uptane::Snapshot::init(const Json::Value &json) { throw Uptane::InvalidMetadata("", "snapshot", "invalid snapshot.json"); } - for (Json::ValueIterator it = meta_list.begin(); it != meta_list.end(); ++it) { + for (auto it = meta_list.begin(); it != meta_list.end(); ++it) { Json::Value hashes_list = (*it)["hashes"]; Json::Value meta_size = (*it)["length"]; Json::Value meta_version = (*it)["version"]; @@ -446,7 +444,7 @@ void Uptane::Snapshot::init(const Json::Value &json) { role_size_[role_object] = -1; } if (hashes_list.isObject()) { - for (Json::ValueIterator h_it = hashes_list.begin(); h_it != hashes_list.end(); ++h_it) { + for (auto h_it = hashes_list.begin(); h_it != hashes_list.end(); ++h_it) { Hash h(h_it.key().asString(), (*h_it).asString()); role_hashes_[role_object].push_back(h); } @@ -488,24 +486,6 @@ int Uptane::Snapshot::role_version(const Uptane::Role &role) const { } }; -bool MetaPack::isConsistent() const { - TimeStamp now(TimeStamp::Now()); - try { - if (director_root.original() != Json::nullValue) { - Uptane::Root original_root(director_root); - Uptane::Root new_root(RepositoryType::Director(), director_root.original(), new_root); - if (director_targets.original() != Json::nullValue) { - Uptane::Targets(RepositoryType::Director(), Role::Targets(), director_targets.original(), - std::make_shared(original_root)); - } - } - } catch (const std::logic_error &exc) { - LOG_WARNING << "Inconsistent metadata: " << exc.what(); - return false; - } - return true; -} - int Uptane::extractVersionUntrusted(const std::string &meta) { auto version_json = Utils::parseJSON(meta)["signed"]["version"]; if (!version_json.isIntegral()) { @@ -514,3 +494,12 @@ int Uptane::extractVersionUntrusted(const std::string &meta) { return version_json.asInt(); } } + +std::string Uptane::getMetaFromBundle(const MetaBundle &bundle, const RepositoryType repo, const Role &role) { + auto it = bundle.find(std::make_pair(repo, role)); + if (it == bundle.end()) { + throw std::runtime_error("Metadata not found for " + role.ToString() + " role from the " + repo.ToString() + + " repository."); + } + return it->second; +} diff --git a/src/libaktualizr/uptane/tuf.h b/src/libaktualizr/uptane/tuf.h index 38e7cf745d..2bea7b6459 100644 --- a/src/libaktualizr/uptane/tuf.h +++ b/src/libaktualizr/uptane/tuf.h @@ -2,7 +2,7 @@ #define AKTUALIZR_UPTANE_TUF_H_ /** - * Base data types that are used in The Update Framework (TUF), part of UPTANE. + * Base data types that are used in The Update Framework (TUF), part of Uptane. */ #include @@ -10,10 +10,8 @@ #include #include #include -#include "uptane/exceptions.h" -#include "crypto/crypto.h" -#include "utilities/types.h" +#include "libaktualizr/types.h" namespace Uptane { @@ -23,33 +21,43 @@ class RepositoryType { enum class Type { kUnknown = -1, kImage = 0, kDirector = 1 }; public: + static const std::string IMAGE; + static const std::string DIRECTOR; + RepositoryType() = default; static constexpr int Director() { return static_cast(Type::kDirector); } static constexpr int Image() { return static_cast(Type::kImage); } + // NOLINTNEXTLINE(google-explicit-constructor, hicpp-explicit-conversions) RepositoryType(int type) { type_ = static_cast(type); } - RepositoryType(const std::string &repo_type) { - if (repo_type == "director") { + explicit RepositoryType(const std::string &repo_type) { + if (repo_type == DIRECTOR) { type_ = RepositoryType::Type::kDirector; - } else if (repo_type == "image") { + } else if (repo_type == IMAGE) { type_ = RepositoryType::Type::kImage; } else { throw std::runtime_error(std::string("Incorrect repo type: ") + repo_type); } } + // NOLINTNEXTLINE(google-explicit-constructor, hicpp-explicit-conversions) operator int() const { return static_cast(type_); } - operator const std::string() const { return toString(); } + // NOLINTNEXTLINE(google-explicit-constructor, hicpp-explicit-conversions) + operator std::string() const { return ToString(); } Type type_; - std::string toString() const { + std::string ToString() const { if (type_ == RepositoryType::Type::kDirector) { - return "director"; + return DIRECTOR; } else if (type_ == RepositoryType::Type::kImage) { - return "image"; + return IMAGE; } else { return ""; } } + + friend std::ostream &operator<<(std::ostream &os, const RepositoryType &repo_type); }; +std::ostream &operator<<(std::ostream &os, const RepositoryType &repo_type); + using KeyId = std::string; /** * TUF Roles @@ -119,6 +127,9 @@ class Version { explicit Version(int v) : version_(v) {} std::string RoleFileName(const Role &role) const; int version() const { return version_; } + bool operator==(const Version &rhs) const { return version_ == rhs.version_; } + bool operator!=(const Version &rhs) const { return version_ != rhs.version_; } + bool operator<(const Version &rhs) const { return version_ < rhs.version_; } private: static const int ANY_VERSION = -1; @@ -128,164 +139,6 @@ class Version { std::ostream &operator<<(std::ostream &os, const Version &v); -class HardwareIdentifier { - public: - // https://github.com/advancedtelematic/ota-tuf/blob/master/libtuf/src/main/scala/com/advancedtelematic/libtuf/data/TufDataType.scala - static const int kMinLength = 0; - static const int kMaxLength = 200; - - static HardwareIdentifier Unknown() { return HardwareIdentifier("Unknown"); } - explicit HardwareIdentifier(const std::string &hwid) : hwid_(hwid) { - /* if (hwid.length() < kMinLength) { - throw std::out_of_range("Hardware Identifier too short"); - } */ - if (kMaxLength < hwid.length()) { - throw std::out_of_range("Hardware Identifier too long"); - } - } - - std::string ToString() const { return hwid_; } - - bool operator==(const HardwareIdentifier &rhs) const { return hwid_ == rhs.hwid_; } - bool operator!=(const HardwareIdentifier &rhs) const { return !(*this == rhs); } - - bool operator<(const HardwareIdentifier &rhs) const { return hwid_ < rhs.hwid_; } - friend std::ostream &operator<<(std::ostream &os, const HardwareIdentifier &hwid); - friend struct std::hash; - - private: - std::string hwid_; -}; - -std::ostream &operator<<(std::ostream &os, const HardwareIdentifier &hwid); - -class EcuSerial { - public: - // https://github.com/advancedtelematic/ota-tuf/blob/master/libtuf/src/main/scala/com/advancedtelematic/libtuf/data/TufDataType.scala - static const int kMinLength = 1; - static const int kMaxLength = 64; - - static EcuSerial Unknown() { return EcuSerial("Unknown"); } - explicit EcuSerial(const std::string &ecu_serial) : ecu_serial_(ecu_serial) { - if (ecu_serial.length() < kMinLength) { - throw std::out_of_range("Ecu serial identifier is too short"); - } - if (kMaxLength < ecu_serial.length()) { - throw std::out_of_range("Ecu serial identifier is too long"); - } - } - - std::string ToString() const { return ecu_serial_; } - - bool operator==(const EcuSerial &rhs) const { return ecu_serial_ == rhs.ecu_serial_; } - bool operator!=(const EcuSerial &rhs) const { return !(*this == rhs); } - - bool operator<(const EcuSerial &rhs) const { return ecu_serial_ < rhs.ecu_serial_; } - friend std::ostream &operator<<(std::ostream &os, const EcuSerial &ecu_serial); - friend struct std::hash; - - private: - std::string ecu_serial_; -}; - -std::ostream &operator<<(std::ostream &os, const EcuSerial &ecu_serial); - -/** - * The hash of a file or TUF metadata. File hashes/checksums in TUF include the length of the object, in order to - * defeat infinite download attacks. - */ -class Hash { - public: - // order corresponds algorithm priority - enum class Type { kSha256, kSha512, kUnknownAlgorithm }; - - Hash(const std::string &type, const std::string &hash); - Hash(Type type, const std::string &hash); - - bool HaveAlgorithm() const { return type_ != Type::kUnknownAlgorithm; } - bool operator==(const Hash &other) const; - bool operator!=(const Hash &other) const { return !operator==(other); } - std::string TypeString() const; - Type type() const; - std::string HashString() const { return hash_; } - friend std::ostream &operator<<(std::ostream &os, const Hash &h); - - static std::string encodeVector(const std::vector &hashes); - static std::vector decodeVector(std::string hashes_str); - - private: - Type type_; - std::string hash_; -}; - -std::ostream &operator<<(std::ostream &os, const Hash &h); - -using EcuMap = std::map; - -class Target { - public: - // From Uptane metadata - Target(std::string filename, const Json::Value &content); - // Internal, does not have type. Only used for reading installation_versions - // list and by various tests. - Target(std::string filename, EcuMap ecus, std::vector hashes, uint64_t length, std::string correlation_id = ""); - - static Target Unknown(); - - const EcuMap &ecus() const { return ecus_; } - std::string filename() const { return filename_; } - std::string sha256Hash() const; - std::string sha512Hash() const; - std::vector hashes() const { return hashes_; }; - std::vector hardwareIds() const { return hwids_; }; - std::string custom_version() const { return custom_["version"].asString(); } - Json::Value custom_data() const { return custom_; } - void updateCustom(Json::Value &custom) { custom_ = custom; }; - std::string correlation_id() const { return correlation_id_; }; - void setCorrelationId(std::string correlation_id) { correlation_id_ = std::move(correlation_id); }; - uint64_t length() const { return length_; } - bool IsValid() const { return valid; } - std::string uri() const { return uri_; }; - void setUri(std::string uri) { uri_ = std::move(uri); }; - bool MatchHash(const Hash &hash) const; - - bool IsForSecondary(const EcuSerial &ecuIdentifier) const { - return (std::find_if(ecus_.cbegin(), ecus_.cend(), [&ecuIdentifier](std::pair pair) { - return pair.first == ecuIdentifier; - }) != ecus_.cend()); - }; - - /** - * Is this an OSTree target? - * OSTree targets need special treatment because the hash doesn't represent - * the contents of the update itself, instead it is the hash (name) of the - * root commit object. - */ - bool IsOstree() const; - - // Comparison is usually not meaningful. Use MatchTarget instead. - bool operator==(const Target &t2) = delete; - bool MatchTarget(const Target &t2) const; - Json::Value toDebugJson() const; - friend std::ostream &operator<<(std::ostream &os, const Target &t); - - private: - bool valid{true}; - std::string filename_; - std::string type_; - EcuMap ecus_; // Director only - std::vector hashes_; - std::vector hwids_; // Images repo only - Json::Value custom_; - uint64_t length_{0}; - std::string correlation_id_; - std::string uri_; - - std::string hashString(Hash::Type type) const; -}; - -std::ostream &operator<<(std::ostream &os, const Target &t); - /* Metadata objects */ class MetaWithKeys; class BaseMeta { @@ -297,7 +150,14 @@ class BaseMeta { TimeStamp expiry() const { return expiry_; } bool isExpired(const TimeStamp &now) const { return expiry_.IsExpiredAt(now); } Json::Value original() const { return original_object_; } - + /** + * Get the first signature of a given meta. + * + * Assumption is that a given metadata includes "signatures" attribute that contains at least one signature. + * @return the first found signature or exception is thrown if not found. + */ + std::string signature() const; + bool isInitialized() const { return !original_object_.isNull(); } bool operator==(const BaseMeta &rhs) const { return version_ == rhs.version() && expiry_ == rhs.expiry(); } protected: @@ -317,20 +177,22 @@ class MetaWithKeys : public BaseMeta { */ MetaWithKeys() { version_ = 0; } /** - * A 'real' metadata object that can contain keys (root or targets with + * A 'real' metadata object that can contain keys (Root or Targets with * delegations) and that implements TUF signature validation. * @param json - The contents of the 'signed' portion */ - MetaWithKeys(const Json::Value &json); + explicit MetaWithKeys(const Json::Value &json); MetaWithKeys(RepositoryType repo, const Role &role, const Json::Value &json, const std::shared_ptr &signer); virtual ~MetaWithKeys() = default; + MetaWithKeys(const MetaWithKeys &guard) = default; void ParseKeys(RepositoryType repo, const Json::Value &keys); // role is the name of a role described in this object's metadata. // meta_role is the name of this object's role. - void ParseRole(RepositoryType repo, const Json::ValueIterator &it, const Role &role, const std::string &meta_role); + void ParseRole(RepositoryType repo, const Json::ValueConstIterator &it, const Role &role, + const std::string &meta_role); /** * Take a JSON blob that contains a signatures/signed component that is supposedly for a given role, and check that is @@ -354,6 +216,10 @@ class MetaWithKeys : public BaseMeta { } protected: + MetaWithKeys(MetaWithKeys &&) = default; + MetaWithKeys &operator=(const MetaWithKeys &guard) = default; + MetaWithKeys &operator=(MetaWithKeys &&) = default; + static const int64_t kMinSignatures = 1; static const int64_t kMaxSignatures = 1000; @@ -370,15 +236,13 @@ class Root : public MetaWithKeys { */ explicit Root(Policy policy = Policy::kRejectAll) : policy_(policy) { version_ = 0; } /** - * A 'real' root that implements TUF signature validation + * A 'real' Root that implements TUF signature validation * @param repo - Repository type (only used to improve the error messages) * @param json - The contents of the 'signed' portion */ Root(RepositoryType repo, const Json::Value &json); Root(RepositoryType repo, const Json::Value &json, Root &root); - ~Root() override = default; - /** * Take a JSON blob that contains a signatures/signed component that is supposedly for a given role, and check that is * suitably signed. @@ -423,12 +287,13 @@ class Targets : public MetaWithKeys { explicit Targets(const Json::Value &json); Targets(RepositoryType repo, const Role &role, const Json::Value &json, const std::shared_ptr &signer); Targets() = default; - ~Targets() override = default; bool operator==(const Targets &rhs) const { return version_ == rhs.version() && expiry_ == rhs.expiry() && MatchTargetVector(targets, rhs.targets); } + const std::string &correlation_id() const { return correlation_id_; } + void clear() { targets.clear(); delegated_role_names_.clear(); @@ -436,6 +301,24 @@ class Targets : public MetaWithKeys { terminating_role_.clear(); } + // Only makes sense for Targets from the Director repo; the Image repo doesn't + // specify ECU serials. + std::vector getTargets(const Uptane::EcuSerial &ecu_id, + const Uptane::HardwareIdentifier &hw_id) const { + std::vector result; + for (auto it = targets.begin(); it != targets.end(); ++it) { + auto found_loc = std::find_if(it->ecus().begin(), it->ecus().end(), + [ecu_id, hw_id](const std::pair &val) { + return ((ecu_id == val.first) && (hw_id == val.second)); + }); + + if (found_loc != it->ecus().end()) { + result.push_back(*it); + } + } + return result; + } + std::vector targets; std::vector delegated_role_names_; std::map> paths_for_role_; @@ -485,24 +368,13 @@ class Snapshot : public BaseMeta { std::map> role_hashes_; }; -struct MetaPack { - Root director_root; - Targets director_targets; - Root image_root; - Targets image_targets; - TimestampMeta image_timestamp; - Snapshot image_snapshot; - bool isConsistent() const; +struct MetaPairHash { + std::size_t operator()(const std::pair &pair) const { + return std::hash()(pair.first.ToString()) ^ std::hash()(pair.second.ToString()); + } }; -struct RawMetaPack { - std::string director_root; - std::string director_targets; - std::string image_root; - std::string image_targets; - std::string image_timestamp; - std::string image_snapshot; -}; +std::string getMetaFromBundle(const MetaBundle &bundle, RepositoryType repo, const Role &role); int extractVersionUntrusted(const std::string &meta); // returns negative number if parsing fails diff --git a/src/libaktualizr/uptane/tuf_hash_test.cc b/src/libaktualizr/uptane/tuf_hash_test.cc deleted file mode 100644 index 6ba74011ee..0000000000 --- a/src/libaktualizr/uptane/tuf_hash_test.cc +++ /dev/null @@ -1,38 +0,0 @@ -#include - -#include "logging/logging.h" -#include "uptane/exceptions.h" -#include "uptane/tuf.h" -#include "utilities/utils.h" - -TEST(TufHash, EncodeDecode) { - std::vector hashes = {{Uptane::Hash::Type::kSha256, "abcd"}, {Uptane::Hash::Type::kSha512, "defg"}}; - - std::string encoded = Uptane::Hash::encodeVector(hashes); - std::vector decoded = Uptane::Hash::decodeVector(encoded); - - EXPECT_EQ(hashes, decoded); -} - -TEST(TufHash, DecodeBad) { - std::string bad1 = ":"; - EXPECT_EQ(Uptane::Hash::decodeVector(bad1), std::vector{}); - - std::string bad2 = ":abcd;sha256:12"; - EXPECT_EQ(Uptane::Hash::decodeVector(bad2), - std::vector{Uptane::Hash(Uptane::Hash::Type::kSha256, "12")}); - - std::string bad3 = "sha256;"; - EXPECT_EQ(Uptane::Hash::decodeVector(bad3), std::vector{}); - - std::string bad4 = "sha256:;"; - EXPECT_EQ(Uptane::Hash::decodeVector(bad4), std::vector{}); -} - -#ifndef __NO_MAIN__ -int main(int argc, char** argv) { - ::testing::InitGoogleTest(&argc, argv); - logger_set_threshold(boost::log::trivial::trace); - return RUN_ALL_TESTS(); -} -#endif diff --git a/src/libaktualizr/uptane/tuf_test.cc b/src/libaktualizr/uptane/tuf_test.cc index 9f12bbd2ed..cef10c869c 100644 --- a/src/libaktualizr/uptane/tuf_test.cc +++ b/src/libaktualizr/uptane/tuf_test.cc @@ -10,7 +10,7 @@ #include "uptane/tuf.h" #include "utilities/utils.h" -/* Validate a TUF root. */ +/* Validate Root metadata. */ TEST(Root, RootValidates) { Json::Value initial_root = Utils::parseJSONFile("tests/tuf/sample1/root.json"); LOG_INFO << "Root is:" << initial_root; @@ -21,7 +21,7 @@ TEST(Root, RootValidates) { EXPECT_NO_THROW(Uptane::Root(Uptane::RepositoryType::Director(), initial_root, root)); } -/* Throw an exception if a TUF root is unsigned. */ +/* Throw an exception if Root metadata is unsigned. */ TEST(Root, RootJsonNoKeys) { Uptane::Root root1(Uptane::Root::Policy::kAcceptAll); Json::Value initial_root = Utils::parseJSONFile("tests/tuf/sample1/root.json"); @@ -29,7 +29,18 @@ TEST(Root, RootJsonNoKeys) { EXPECT_THROW(Uptane::Root(Uptane::RepositoryType::Director(), initial_root, root1), Uptane::InvalidMetadata); } -/* Throw an exception if a TUF root has no roles. */ +/** + * For offline updates we want to include more keys in root.json. Check that it + * is OK for root.json to contain keys types we don't know about. + * */ +TEST(Root, ExtraKeysOk) { + Uptane::Root root1(Uptane::Root::Policy::kAcceptAll); + Json::Value initial_root = Utils::parseJSONFile("tests/tuf/root-with-extra-keys.json"); + Uptane::Root root(Uptane::RepositoryType::Director(), initial_root, root1); + EXPECT_NO_THROW(Uptane::Root(Uptane::RepositoryType::Director(), initial_root, root)); +} + +/* Throw an exception if Root metadata has no roles. */ TEST(Root, RootJsonNoRoles) { Uptane::Root root1(Uptane::Root::Policy::kAcceptAll); Json::Value initial_root = Utils::parseJSONFile("tests/tuf/sample1/root.json"); @@ -39,7 +50,6 @@ TEST(Root, RootJsonNoRoles) { /** * Check that a root.json that uses "method": "rsassa-pss-sha256" validates correctly - * See PRO-2999 */ TEST(Root, RootJsonRsassaPssSha256) { Uptane::Root root1(Uptane::Root::Policy::kAcceptAll); @@ -111,8 +121,8 @@ Json::Value generateDirectorTarget(const std::string& hash, const int length, co return target; } -Json::Value generateImagesTarget(const std::string& hash, const int length, - const std::vector& hardwareIds) { +Json::Value generateImageTarget(const std::string& hash, const int length, + const std::vector& hardwareIds) { Json::Value target = generateTarget(hash, length); Json::Value custom; Json::Value hwids; @@ -132,7 +142,7 @@ TEST(Target, Match) { hardwareIds.emplace_back(hwid); ecu_map.insert({Uptane::EcuSerial("serial"), hwid}); Uptane::Target target1("abc", generateDirectorTarget("hash_good", 739, ecu_map)); - Uptane::Target target2("abc", generateImagesTarget("hash_good", 739, hardwareIds)); + Uptane::Target target2("abc", generateImageTarget("hash_good", 739, hardwareIds)); EXPECT_TRUE(target1.MatchTarget(target2)); EXPECT_TRUE(target2.MatchTarget(target1)); } @@ -150,20 +160,20 @@ TEST(Target, MatchDirector) { EXPECT_TRUE(target2.MatchTarget(target1)); } -/* Two Target objects created by the Images repo should match. */ +/* Two Target objects created by the Image repo should match. */ TEST(Target, MatchImages) { Uptane::HardwareIdentifier hwid("first-test"); Uptane::HardwareIdentifier hwid2("second-test"); std::vector hardwareIds; hardwareIds.emplace_back(hwid); hardwareIds.emplace_back(hwid2); - Uptane::Target target1("abc", generateImagesTarget("hash_good", 739, hardwareIds)); - Uptane::Target target2("abc", generateImagesTarget("hash_good", 739, hardwareIds)); + Uptane::Target target1("abc", generateImageTarget("hash_good", 739, hardwareIds)); + Uptane::Target target2("abc", generateImageTarget("hash_good", 739, hardwareIds)); EXPECT_TRUE(target1.MatchTarget(target2)); EXPECT_TRUE(target2.MatchTarget(target1)); } -/* Extra hardware IDs in the Images Target metadata should still match. */ +/* Extra hardware IDs in the Image Target metadata should still match. */ TEST(Target, MatchExtraHwId) { Uptane::HardwareIdentifier hwid("fake-test"); std::vector hardwareIds; @@ -172,7 +182,7 @@ TEST(Target, MatchExtraHwId) { ecu_map.insert({Uptane::EcuSerial("serial"), hwid}); Uptane::Target target1("abc", generateDirectorTarget("hash_good", 739, ecu_map)); hardwareIds.emplace_back(Uptane::HardwareIdentifier("extra")); - Uptane::Target target2("abc", generateImagesTarget("hash_good", 739, hardwareIds)); + Uptane::Target target2("abc", generateImageTarget("hash_good", 739, hardwareIds)); EXPECT_TRUE(target1.MatchTarget(target2)); EXPECT_TRUE(target2.MatchTarget(target1)); } @@ -188,7 +198,7 @@ TEST(Target, MatchTwo) { ecu_map.insert({Uptane::EcuSerial("serial"), hwid}); ecu_map.insert({Uptane::EcuSerial("serial2"), hwid2}); Uptane::Target target1("abc", generateDirectorTarget("hash_good", 739, ecu_map)); - Uptane::Target target2("abc", generateImagesTarget("hash_good", 739, hardwareIds)); + Uptane::Target target2("abc", generateImageTarget("hash_good", 739, hardwareIds)); EXPECT_TRUE(target1.MatchTarget(target2)); EXPECT_TRUE(target2.MatchTarget(target1)); } @@ -199,9 +209,9 @@ TEST(Target, MultipleHwIdMismatch) { std::vector hardwareIds; hardwareIds.emplace_back(hwid); hardwareIds.emplace_back(Uptane::HardwareIdentifier("extra")); - Uptane::Target target1("abc", generateImagesTarget("hash_good", 739, hardwareIds)); + Uptane::Target target1("abc", generateImageTarget("hash_good", 739, hardwareIds)); hardwareIds.emplace_back(Uptane::HardwareIdentifier("extra2")); - Uptane::Target target2("abc", generateImagesTarget("hash_good", 739, hardwareIds)); + Uptane::Target target2("abc", generateImageTarget("hash_good", 739, hardwareIds)); EXPECT_FALSE(target1.MatchTarget(target2)); EXPECT_FALSE(target2.MatchTarget(target1)); } @@ -215,7 +225,7 @@ TEST(Target, MissingHwId) { ecu_map.insert({Uptane::EcuSerial("serial"), hwid}); Uptane::Target target1("abc", generateDirectorTarget("hash_good", 739, ecu_map)); hardwareIds.clear(); - Uptane::Target target2("abc", generateImagesTarget("hash_good", 739, hardwareIds)); + Uptane::Target target2("abc", generateImageTarget("hash_good", 739, hardwareIds)); EXPECT_FALSE(target1.MatchTarget(target2)); EXPECT_FALSE(target2.MatchTarget(target1)); } @@ -228,7 +238,7 @@ TEST(Target, FilenameMismatch) { hardwareIds.emplace_back(hwid); ecu_map.insert({Uptane::EcuSerial("serial"), hwid}); Uptane::Target target1("abc", generateDirectorTarget("hash_good", 739, ecu_map)); - Uptane::Target target2("xyz", generateImagesTarget("hash_good", 739, hardwareIds)); + Uptane::Target target2("xyz", generateImageTarget("hash_good", 739, hardwareIds)); EXPECT_FALSE(target1.MatchTarget(target2)); EXPECT_FALSE(target2.MatchTarget(target1)); } @@ -241,7 +251,7 @@ TEST(Target, LengthMismatch) { hardwareIds.emplace_back(hwid); ecu_map.insert({Uptane::EcuSerial("serial"), hwid}); Uptane::Target target1("abc", generateDirectorTarget("hash_good", 739, ecu_map)); - Uptane::Target target2("abc", generateImagesTarget("hash_good", 1, hardwareIds)); + Uptane::Target target2("abc", generateImageTarget("hash_good", 1, hardwareIds)); EXPECT_FALSE(target1.MatchTarget(target2)); EXPECT_FALSE(target2.MatchTarget(target1)); } @@ -255,7 +265,7 @@ TEST(Target, HardwareIdMismatch) { ecu_map.insert({Uptane::EcuSerial("serial"), hwid}); Uptane::Target target1("abc", generateDirectorTarget("hash_good", 739, ecu_map)); hardwareIds[0] = Uptane::HardwareIdentifier("alt-test"); - Uptane::Target target2("abc", generateImagesTarget("hash_good", 739, hardwareIds)); + Uptane::Target target2("abc", generateImageTarget("hash_good", 739, hardwareIds)); EXPECT_FALSE(target1.MatchTarget(target2)); EXPECT_FALSE(target2.MatchTarget(target1)); } @@ -268,7 +278,7 @@ TEST(Target, HashMismatch) { hardwareIds.emplace_back(hwid); ecu_map.insert({Uptane::EcuSerial("serial"), hwid}); Uptane::Target target1("abc", generateDirectorTarget("hash_good", 739, ecu_map)); - Uptane::Target target2("abc", generateImagesTarget("hash_bad", 739, hardwareIds)); + Uptane::Target target2("abc", generateImageTarget("hash_bad", 739, hardwareIds)); EXPECT_FALSE(target1.MatchTarget(target2)); EXPECT_FALSE(target2.MatchTarget(target1)); } diff --git a/src/libaktualizr/uptane/uptane_ci_test.cc b/src/libaktualizr/uptane/uptane_ci_test.cc index 3ecd9cb98a..aebba60f4c 100644 --- a/src/libaktualizr/uptane/uptane_ci_test.cc +++ b/src/libaktualizr/uptane/uptane_ci_test.cc @@ -7,11 +7,12 @@ #include #include +#include "libaktualizr/packagemanagerfactory.h" +#include "libaktualizr/packagemanagerinterface.h" + #include "http/httpclient.h" #include "logging/logging.h" #include "package_manager/ostreemanager.h" -#include "package_manager/packagemanagerfactory.h" -#include "package_manager/packagemanagerinterface.h" #include "primary/reportqueue.h" #include "primary/sotauptaneclient.h" #include "storage/invstorage.h" @@ -28,16 +29,13 @@ TEST(UptaneCI, ProvisionAndPutManifest) { TemporaryDirectory temp_dir; Config config("tests/config/minimal.toml"); config.provision.provision_path = credentials; - config.provision.mode = ProvisionMode::kSharedCred; + config.provision.mode = ProvisionMode::kSharedCredReuse; config.storage.path = temp_dir.Path(); - config.pacman.type = PackageManager::kNone; + config.pacman.type = PACKAGE_MANAGER_NONE; config.postUpdateValues(); // re-run copy of urls auto storage = INvStorage::newStorage(config.storage); - auto http = std::make_shared(); - Uptane::Manifest uptane_manifest{config, storage}; - - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); + auto sota_client = std_::make_unique(config, storage); EXPECT_NO_THROW(sota_client->initialize()); EXPECT_TRUE(sota_client->putManifestSimple()); } @@ -46,18 +44,17 @@ TEST(UptaneCI, CheckKeys) { TemporaryDirectory temp_dir; Config config("tests/config/minimal.toml"); config.provision.provision_path = credentials; - config.provision.mode = ProvisionMode::kSharedCred; + config.provision.mode = ProvisionMode::kSharedCredReuse; config.storage.path = temp_dir.Path(); - config.pacman.type = PackageManager::kOstree; + config.pacman.type = PACKAGE_MANAGER_OSTREE; config.pacman.sysroot = sysroot; config.postUpdateValues(); // re-run copy of urls boost::filesystem::remove_all(config.storage.path); auto storage = INvStorage::newStorage(config.storage); - auto http = std::make_shared(); UptaneTestCommon::addDefaultSecondary(config, temp_dir, "", "secondary_hardware"); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); + auto sota_client = std_::make_unique(config, storage); EXPECT_NO_THROW(sota_client->initialize()); std::string ca; @@ -74,7 +71,7 @@ TEST(UptaneCI, CheckKeys) { EXPECT_TRUE(primary_public.size() > 0); EXPECT_TRUE(primary_private.size() > 0); - std::map >::iterator it; + std::map >::iterator it; for (it = sota_client->secondaries.begin(); it != sota_client->secondaries.end(); it++) { std::shared_ptr managed_secondary = std::dynamic_pointer_cast(it->second); diff --git a/src/libaktualizr/uptane/uptane_delegation_test.cc b/src/libaktualizr/uptane/uptane_delegation_test.cc index 53d938e367..0256a6cedc 100644 --- a/src/libaktualizr/uptane/uptane_delegation_test.cc +++ b/src/libaktualizr/uptane/uptane_delegation_test.cc @@ -5,10 +5,11 @@ #include #include "json/json.h" -#include "config/config.h" +#include "libaktualizr/aktualizr.h" +#include "libaktualizr/config.h" +#include "libaktualizr/events.h" + #include "httpfake.h" -#include "primary/aktualizr.h" -#include "primary/events.h" #include "uptane_test_common.h" boost::filesystem::path uptane_generator_path; @@ -54,7 +55,7 @@ class HttpFakeDelegation : public HttpFake { /* Validate first-order target delegations. * Search first-order delegations. - * Correlation ID is empty if none was provided in targets metadata. */ + * Correlation ID is empty if none was provided in Targets metadata. */ TEST(Delegation, Basic) { for (auto generate_fun : {delegation_basic, delegation_nested}) { TemporaryDirectory temp_dir; @@ -78,37 +79,42 @@ TEST(Delegation, Basic) { EXPECT_EQ(download_result.status, result::DownloadStatus::kSuccess); result::Install install_result = aktualizr.Install(download_result.updates).get(); + EXPECT_TRUE(install_result.dev_report.success); for (const auto& r : install_result.ecu_reports) { EXPECT_EQ(r.install_res.result_code.num_code, data::ResultCode::Numeric::kOk); } } - EXPECT_EQ(http->events_seen, 8); + // TODO: implement delegation support in ManagedSecondary and restore the + // Secondary target in the metadata generation scripts; then there should + // be 8 events. + EXPECT_EQ(http->events_seen, 4); } } TEST(Delegation, RevokeAfterCheckUpdates) { for (auto generate_fun : {delegation_basic, delegation_nested}) { TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path()); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + auto storage = INvStorage::newStorage(conf.storage); + auto delegation_path = temp_dir.Path() / "delegation_test"; - generate_fun(delegation_path, false); { - auto http = std::make_shared(temp_dir.Path()); - Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); - auto storage = INvStorage::newStorage(conf.storage); + generate_fun(delegation_path, false); UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); aktualizr.Initialize(); result::UpdateCheck update_result = aktualizr.CheckUpdates().get(); EXPECT_EQ(update_result.status, result::UpdateStatus::kUpdatesAvailable); - EXPECT_EQ(update_result.updates.size(), 2); + // TODO: implement delegation support in ManagedSecondary and restore the + // Secondary target in the metadata generation scripts; then there should + // be 2 updates. + EXPECT_EQ(update_result.updates.size(), 1); } // Revoke delegation after CheckUpdates() and test if we can properly handle it. { generate_fun(delegation_path, true); - auto http = std::make_shared(temp_dir.Path()); - Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); - auto storage = INvStorage::newStorage(conf.storage); UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); aktualizr.Initialize(); @@ -120,6 +126,7 @@ TEST(Delegation, RevokeAfterCheckUpdates) { EXPECT_EQ(download_result.status, result::DownloadStatus::kSuccess); result::Install install_result = aktualizr.Install(download_result.updates).get(); + EXPECT_TRUE(install_result.dev_report.success); for (const auto& r : install_result.ecu_reports) { EXPECT_EQ(r.install_res.result_code.num_code, data::ResultCode::Numeric::kOk); } @@ -130,12 +137,13 @@ TEST(Delegation, RevokeAfterCheckUpdates) { TEST(Delegation, RevokeAfterDownload) { for (auto generate_fun : {delegation_basic, delegation_nested}) { TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path()); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + auto storage = INvStorage::newStorage(conf.storage); + auto delegation_path = temp_dir.Path() / "delegation_test"; - generate_fun(delegation_path, false); { - auto http = std::make_shared(temp_dir.Path()); - Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); - auto storage = INvStorage::newStorage(conf.storage); + generate_fun(delegation_path, false); UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); aktualizr.Initialize(); @@ -148,10 +156,6 @@ TEST(Delegation, RevokeAfterDownload) { // Revoke delegation after Download() and test if we can properly handle it { generate_fun(delegation_path, true); - - auto http = std::make_shared(temp_dir.Path()); - Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); - auto storage = INvStorage::newStorage(conf.storage); UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); aktualizr.Initialize(); @@ -173,12 +177,13 @@ TEST(Delegation, RevokeAfterDownload) { TEST(Delegation, RevokeAfterInstall) { for (auto generate_fun : {delegation_basic, delegation_nested}) { TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path()); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + auto storage = INvStorage::newStorage(conf.storage); + auto delegation_path = temp_dir.Path() / "delegation_test"; - generate_fun(delegation_path, false); { - auto http = std::make_shared(temp_dir.Path()); - Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); - auto storage = INvStorage::newStorage(conf.storage); + generate_fun(delegation_path, false); UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); aktualizr.Initialize(); @@ -196,10 +201,6 @@ TEST(Delegation, RevokeAfterInstall) { // Revoke delegation after Install() and test if can properly CheckUpdates again { generate_fun(delegation_path, true); - - auto http = std::make_shared(temp_dir.Path()); - Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); - auto storage = INvStorage::newStorage(conf.storage); UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); aktualizr.Initialize(); diff --git a/src/libaktualizr/uptane/uptane_init_test.cc b/src/libaktualizr/uptane/uptane_init_test.cc deleted file mode 100644 index 0519e0fb6f..0000000000 --- a/src/libaktualizr/uptane/uptane_init_test.cc +++ /dev/null @@ -1,311 +0,0 @@ -#include - -#include - -#include - -#include "httpfake.h" -#include "primary/initializer.h" -#include "primary/sotauptaneclient.h" -#include "storage/invstorage.h" -#include "utilities/utils.h" - -/* - * Check that aktualizr creates provisioning data if they don't exist already. - */ -TEST(Uptane, Initialize) { - RecordProperty("zephyr_key", "OTA-983,TST-153"); - TemporaryDirectory temp_dir; - auto http = std::make_shared(temp_dir.Path()); - Config conf("tests/config/basic.toml"); - conf.uptane.director_server = http->tls_server + "/director"; - conf.uptane.repo_server = http->tls_server + "/repo"; - conf.tls.server = http->tls_server; - conf.storage.path = temp_dir.Path(); - conf.provision.primary_ecu_serial = "testecuserial"; - - // First make sure nothing is already there. - auto storage = INvStorage::newStorage(conf.storage); - std::string pkey; - std::string cert; - std::string ca; - EXPECT_FALSE(storage->loadTlsCreds(&ca, &cert, &pkey)); - std::string public_key; - std::string private_key; - EXPECT_FALSE(storage->loadPrimaryKeys(&public_key, &private_key)); - - // Initialize. - KeyManager keys(storage, conf.keymanagerConfig()); - Initializer initializer(conf.provision, storage, http, keys, {}); - EXPECT_TRUE(initializer.isSuccessful()); - - // Then verify that the storage contains what we expect. - EXPECT_TRUE(storage->loadTlsCreds(&ca, &cert, &pkey)); - EXPECT_NE(ca, ""); - EXPECT_NE(cert, ""); - EXPECT_NE(pkey, ""); - EXPECT_TRUE(storage->loadPrimaryKeys(&public_key, &private_key)); - EXPECT_NE(public_key, ""); - EXPECT_NE(private_key, ""); - - const Json::Value ecu_data = Utils::parseJSONFile(temp_dir.Path() / "post.json"); - EXPECT_EQ(ecu_data["ecus"].size(), 1); - EXPECT_EQ(ecu_data["ecus"][0]["clientKey"]["keyval"]["public"].asString(), public_key); - EXPECT_EQ(ecu_data["ecus"][0]["ecu_serial"].asString(), conf.provision.primary_ecu_serial); - EXPECT_NE(ecu_data["ecus"][0]["hardware_identifier"].asString(), ""); - EXPECT_EQ(ecu_data["primary_ecu_serial"].asString(), conf.provision.primary_ecu_serial); -} - -/* - * Check that aktualizr does NOT change provisioning data if they DO exist - * already. - */ -TEST(Uptane, InitializeTwice) { - RecordProperty("zephyr_key", "OTA-983,TST-154"); - TemporaryDirectory temp_dir; - auto http = std::make_shared(temp_dir.Path()); - Config conf("tests/config/basic.toml"); - conf.storage.path = temp_dir.Path(); - conf.provision.primary_ecu_serial = "testecuserial"; - - // First make sure nothing is already there. - auto storage = INvStorage::newStorage(conf.storage); - std::string pkey1; - std::string cert1; - std::string ca1; - EXPECT_FALSE(storage->loadTlsCreds(&ca1, &cert1, &pkey1)); - std::string public_key1; - std::string private_key1; - EXPECT_FALSE(storage->loadPrimaryKeys(&public_key1, &private_key1)); - - // Intialize and verify that the storage contains what we expect. - { - KeyManager keys(storage, conf.keymanagerConfig()); - Initializer initializer(conf.provision, storage, http, keys, {}); - EXPECT_TRUE(initializer.isSuccessful()); - - EXPECT_TRUE(storage->loadTlsCreds(&ca1, &cert1, &pkey1)); - EXPECT_NE(ca1, ""); - EXPECT_NE(cert1, ""); - EXPECT_NE(pkey1, ""); - EXPECT_TRUE(storage->loadPrimaryKeys(&public_key1, &private_key1)); - EXPECT_NE(public_key1, ""); - EXPECT_NE(private_key1, ""); - } - - // Intialize again and verify that nothing has changed. - { - KeyManager keys(storage, conf.keymanagerConfig()); - Initializer initializer(conf.provision, storage, http, keys, {}); - EXPECT_TRUE(initializer.isSuccessful()); - - std::string pkey2; - std::string cert2; - std::string ca2; - EXPECT_TRUE(storage->loadTlsCreds(&ca2, &cert2, &pkey2)); - std::string public_key2; - std::string private_key2; - EXPECT_TRUE(storage->loadPrimaryKeys(&public_key2, &private_key2)); - - EXPECT_EQ(cert1, cert2); - EXPECT_EQ(ca1, ca2); - EXPECT_EQ(pkey1, pkey2); - EXPECT_EQ(public_key1, public_key2); - EXPECT_EQ(private_key1, private_key2); - } -} - -/** - * Check that aktualizr does not generate a pet name when device ID is - * specified. - */ -TEST(Uptane, PetNameProvided) { - RecordProperty("zephyr_key", "OTA-985,TST-146"); - TemporaryDirectory temp_dir; - const std::string test_name = "test-name-123"; - - /* Make sure provided device ID is read as expected. */ - Config conf("tests/config/device_id.toml"); - conf.storage.path = temp_dir.Path(); - conf.provision.primary_ecu_serial = "testecuserial"; - - auto storage = INvStorage::newStorage(conf.storage); - auto http = std::make_shared(temp_dir.Path()); - KeyManager keys(storage, conf.keymanagerConfig()); - Initializer initializer(conf.provision, storage, http, keys, {}); - EXPECT_TRUE(initializer.isSuccessful()); - - { - EXPECT_EQ(conf.provision.device_id, test_name); - std::string devid; - EXPECT_TRUE(storage->loadDeviceId(&devid)); - EXPECT_EQ(devid, test_name); - } - - { - /* Make sure name is unchanged after re-initializing config. */ - conf.postUpdateValues(); - EXPECT_EQ(conf.provision.device_id, test_name); - std::string devid; - EXPECT_TRUE(storage->loadDeviceId(&devid)); - EXPECT_EQ(devid, test_name); - } -} - -/** - * Check that aktualizr generates a pet name if no device ID is specified. - */ -TEST(Uptane, PetNameCreation) { - RecordProperty("zephyr_key", "OTA-985,TST-145"); - TemporaryDirectory temp_dir; - - // Make sure name is created. - Config conf("tests/config/basic.toml"); - conf.storage.path = temp_dir.Path(); - conf.provision.primary_ecu_serial = "testecuserial"; - boost::filesystem::copy_file("tests/test_data/cred.zip", temp_dir.Path() / "cred.zip"); - conf.provision.provision_path = temp_dir.Path() / "cred.zip"; - - std::string test_name1, test_name2; - { - auto storage = INvStorage::newStorage(conf.storage); - auto http = std::make_shared(temp_dir.Path()); - KeyManager keys(storage, conf.keymanagerConfig()); - Initializer initializer(conf.provision, storage, http, keys, {}); - EXPECT_TRUE(initializer.isSuccessful()); - - EXPECT_TRUE(storage->loadDeviceId(&test_name1)); - EXPECT_NE(test_name1, ""); - } - - // Make sure a new name is generated if the config does not specify a name and - // there is no device_id file. - TemporaryDirectory temp_dir2; - { - conf.storage.path = temp_dir2.Path(); - boost::filesystem::copy_file("tests/test_data/cred.zip", temp_dir2.Path() / "cred.zip"); - conf.provision.device_id = ""; - - auto storage = INvStorage::newStorage(conf.storage); - auto http = std::make_shared(temp_dir2.Path()); - KeyManager keys(storage, conf.keymanagerConfig()); - Initializer initializer(conf.provision, storage, http, keys, {}); - EXPECT_TRUE(initializer.isSuccessful()); - - EXPECT_TRUE(storage->loadDeviceId(&test_name2)); - EXPECT_NE(test_name2, test_name1); - } - - // If the device_id is cleared in the config, but still present in the - // storage, re-initializing the config should read the device_id from storage. - { - conf.provision.device_id = ""; - auto storage = INvStorage::newStorage(conf.storage); - auto http = std::make_shared(temp_dir2.Path()); - KeyManager keys(storage, conf.keymanagerConfig()); - Initializer initializer(conf.provision, storage, http, keys, {}); - EXPECT_TRUE(initializer.isSuccessful()); - - std::string devid; - EXPECT_TRUE(storage->loadDeviceId(&devid)); - EXPECT_EQ(devid, test_name2); - } - - // If the device_id is removed from storage, but the field is still present in - // the config, re-initializing the config should still read the device_id from - // config. - { - TemporaryDirectory temp_dir3; - conf.storage.path = temp_dir3.Path(); - boost::filesystem::copy_file("tests/test_data/cred.zip", temp_dir3.Path() / "cred.zip"); - conf.provision.device_id = test_name2; - - auto storage = INvStorage::newStorage(conf.storage); - auto http = std::make_shared(temp_dir3.Path()); - KeyManager keys(storage, conf.keymanagerConfig()); - Initializer initializer(conf.provision, storage, http, keys, {}); - EXPECT_TRUE(initializer.isSuccessful()); - - std::string devid; - EXPECT_TRUE(storage->loadDeviceId(&devid)); - EXPECT_EQ(devid, test_name2); - } -} - -/* Detect and recover from failed provisioning. */ -TEST(Uptane, InitializeFail) { - TemporaryDirectory temp_dir; - auto http = std::make_shared(temp_dir.Path()); - Config conf("tests/config/basic.toml"); - conf.uptane.director_server = http->tls_server + "/director"; - conf.uptane.repo_server = http->tls_server + "/repo"; - conf.tls.server = http->tls_server; - conf.storage.path = temp_dir.Path(); - conf.provision.primary_ecu_serial = "testecuserial"; - - auto storage = INvStorage::newStorage(conf.storage); - KeyManager keys(storage, conf.keymanagerConfig()); - - // Force a failure from the fake server. - { - http->provisioningResponse = ProvisioningResult::kFailure; - Initializer initializer(conf.provision, storage, http, keys, {}); - EXPECT_FALSE(initializer.isSuccessful()); - } - - // Don't force a failure and make sure it actually works this time. - { - http->provisioningResponse = ProvisioningResult::kOK; - Initializer initializer(conf.provision, storage, http, keys, {}); - EXPECT_TRUE(initializer.isSuccessful()); - } -} - -/** - * Verifies if the system hostname is used as a primary ECU hardware ID - * if it's not specified in the configuration - * - * Checks actions: - * - * - [x] Use the system hostname as hardware ID if one is not provided - */ -TEST(Uptane, HostnameAsHardwareID) { - TemporaryDirectory temp_dir; - Config conf("tests/config/basic.toml"); - conf.storage.path = temp_dir.Path(); - - boost::filesystem::copy_file("tests/test_data/cred.zip", temp_dir.Path() / "cred.zip"); - conf.provision.provision_path = temp_dir.Path() / "cred.zip"; - - { - auto storage = INvStorage::newStorage(conf.storage); - auto http = std::make_shared(temp_dir.Path()); - KeyManager keys(storage, conf.keymanagerConfig()); - - EXPECT_TRUE(conf.provision.primary_ecu_hardware_id.empty()); - Initializer initializer(conf.provision, storage, http, keys, {}); - EXPECT_TRUE(initializer.isSuccessful()); - - EcuSerials ecu_serials; - EXPECT_TRUE(storage->loadEcuSerials(&ecu_serials)); - EXPECT_GE(ecu_serials.size(), 1); - - // A second element of the first tuple in ECU Serials tuple array is a primary hardware ID. - // Each client of the storage class needs to know this information. - // If it changes then corresponding changes should be done in each storage client. - // perhaps it makes sense to introduce get/setPrimaryHardwareID method and incapsulate - // this tech info within storage (or maybe some other entity) - auto primaryHardwareID = ecu_serials[0].second; - auto hostname = Utils::getHostname(); - EXPECT_EQ(primaryHardwareID, Uptane::HardwareIdentifier(hostname)); - } -} - -#ifndef __NO_MAIN__ -int main(int argc, char** argv) { - ::testing::InitGoogleTest(&argc, argv); - logger_init(); - logger_set_threshold(boost::log::trivial::trace); - return RUN_ALL_TESTS(); -} -#endif diff --git a/src/libaktualizr/uptane/uptane_network_test.cc b/src/libaktualizr/uptane/uptane_network_test.cc index 6cf7e6eed8..8ca3d10cad 100644 --- a/src/libaktualizr/uptane/uptane_network_test.cc +++ b/src/libaktualizr/uptane/uptane_network_test.cc @@ -5,6 +5,7 @@ * network issues. */ #include +#include #include #include @@ -14,8 +15,9 @@ #include #include "http/httpclient.h" +#include "httpfake.h" #include "logging/logging.h" -#include "primary/initializer.h" +#include "primary/provisioner.h" #include "primary/sotauptaneclient.h" #include "storage/invstorage.h" #include "test_utils.h" @@ -26,11 +28,10 @@ Config conf("tests/config/basic.toml"); std::string port; -bool doTestInit(StorageType storage_type, const std::string &device_register_state, - const std::string &ecu_register_state) { +bool doTestInit(const std::string &device_register_state, const std::string &ecu_register_state) { LOG_INFO << "First attempt to initialize."; TemporaryDirectory temp_dir; - conf.storage.type = storage_type; + conf.storage.type = StorageType::kSqlite; conf.storage.path = temp_dir.Path(); conf.provision.expiry_days = device_register_state; conf.provision.primary_ecu_serial = ecu_register_state; @@ -41,11 +42,12 @@ bool doTestInit(StorageType storage_type, const std::string &device_register_sta bool result; auto http = std::make_shared(); + http->timeout(1000); auto store = INvStorage::newStorage(conf.storage); { - KeyManager keys(store, conf.keymanagerConfig()); - Initializer initializer(conf.provision, store, http, keys, {}); - result = initializer.isSuccessful(); + auto keys = std::make_shared(store, conf.keymanagerConfig()); + Provisioner provisioner(conf.provision, store, http, keys, {}); + result = provisioner.Attempt(); } if (device_register_state != "noerrors" || ecu_register_state != "noerrors") { EXPECT_FALSE(result); @@ -54,78 +56,70 @@ bool doTestInit(StorageType storage_type, const std::string &device_register_sta conf.provision.expiry_days = "noerrors"; conf.provision.primary_ecu_serial = "noerrors"; - if (device_register_state == "noerrors" && ecu_register_state != "noerrors") { - // restore a "good" ecu serial in the ecu register fault injection case - // (the bad value has been cached in storage) - EcuSerials serials; - store->loadEcuSerials(&serials); - serials[0].first = Uptane::EcuSerial(conf.provision.primary_ecu_serial); - store->storeEcuSerials(serials); - } - - KeyManager keys(store, conf.keymanagerConfig()); - Initializer initializer(conf.provision, store, http, keys, {}); - result = initializer.isSuccessful(); + auto keys = std::make_shared(store, conf.keymanagerConfig()); + Provisioner provisioner(conf.provision, store, http, keys, {}); + result = provisioner.Attempt(); } return result; } -TEST(UptaneNetwork, device_drop_request_sqlite) { +TEST(UptaneNetwork, DeviceDropRequest) { RecordProperty("zephyr_key", "OTA-991,TST-158"); - EXPECT_TRUE(doTestInit(StorageType::kSqlite, "drop_request", "noerrors")); + EXPECT_TRUE(doTestInit("drop_request", "noerrors")); } -TEST(UptaneNetwork, device_drop_body_sqlite) { +TEST(UptaneNetwork, DeviceDropBody) { RecordProperty("zephyr_key", "OTA-991,TST-158"); - EXPECT_TRUE(doTestInit(StorageType::kSqlite, "drop_body", "noerrors")); + EXPECT_TRUE(doTestInit("drop_body", "noerrors")); } -TEST(UptaneNetwork, device_503_sqlite) { +TEST(UptaneNetwork, Device503) { RecordProperty("zephyr_key", "OTA-991,TST-158"); - EXPECT_TRUE(doTestInit(StorageType::kSqlite, "status_503", "noerrors")); + EXPECT_TRUE(doTestInit("status_503", "noerrors")); } -TEST(UptaneNetwork, device_408_sqlite) { +TEST(UptaneNetwork, Device408) { RecordProperty("zephyr_key", "OTA-991,TST-158"); - EXPECT_TRUE(doTestInit(StorageType::kSqlite, "status_408", "noerrors")); + EXPECT_TRUE(doTestInit("status_408", "noerrors")); } -TEST(UptaneNetwork, ecu_drop_request_sqlite) { +TEST(UptaneNetwork, EcuDropRequest) { RecordProperty("zephyr_key", "OTA-991,TST-158"); - EXPECT_TRUE(doTestInit(StorageType::kSqlite, "noerrors", "drop_request")); + EXPECT_TRUE(doTestInit("noerrors", "drop_request")); } -TEST(UptaneNetwork, ecu_503_sqlite) { +TEST(UptaneNetwork, Ecu503) { RecordProperty("zephyr_key", "OTA-991,TST-158"); - EXPECT_TRUE(doTestInit(StorageType::kSqlite, "noerrors", "status_503")); + EXPECT_TRUE(doTestInit("noerrors", "status_503")); } -TEST(UptaneNetwork, ecu_408_sqlite) { +TEST(UptaneNetwork, Ecu408) { RecordProperty("zephyr_key", "OTA-991,TST-158"); - EXPECT_TRUE(doTestInit(StorageType::kSqlite, "noerrors", "status_408")); + EXPECT_TRUE(doTestInit("noerrors", "status_408")); } -TEST(UptaneNetwork, no_connection_sqlite) { +TEST(UptaneNetwork, NoConnection) { RecordProperty("zephyr_key", "OTA-991,TST-158"); - EXPECT_TRUE(doTestInit(StorageType::kSqlite, "noconnection", "noerrors")); + + EXPECT_TRUE(doTestInit("noconnection", "noerrors")); } -TEST(UptaneNetwork, no_errors_sqlite) { +TEST(UptaneNetwork, NoErrors) { RecordProperty("zephyr_key", "OTA-991,TST-158"); - EXPECT_TRUE(doTestInit(StorageType::kSqlite, "noerrors", "noerrors")); + EXPECT_TRUE(doTestInit("noerrors", "noerrors")); } TEST(UptaneNetwork, DownloadFailure) { TemporaryDirectory temp_dir; conf.storage.path = temp_dir.Path(); + conf.pacman.images_path = temp_dir.Path() / "images"; conf.provision.expiry_days = "download_failure"; conf.provision.primary_ecu_serial = "download_failure"; conf.provision.primary_ecu_hardware_id = "hardware_id"; auto storage = INvStorage::newStorage(conf.storage); - auto http = std::make_shared(); - auto up = newTestClient(conf, storage, http); + auto up = std_::make_unique(conf, storage); EXPECT_NO_THROW(up->initialize()); Json::Value ot_json; @@ -140,6 +134,71 @@ TEST(UptaneNetwork, DownloadFailure) { EXPECT_TRUE(result.first); } +/* + * Output a log when connectivity is restored. + */ +class HttpUnstable : public HttpFake { + public: + explicit HttpUnstable(const boost::filesystem::path &test_dir_in) : HttpFake(test_dir_in, "hasupdates") {} + HttpResponse get(const std::string &url, int64_t maxsize) override { + if (!connectSwitch) { + return HttpResponse({}, 503, CURLE_OK, ""); + } else { + return HttpFake::get(url, maxsize); + } + } + + HttpResponse put(const std::string &url, const Json::Value &data) override { + if (!connectSwitch) { + (void)data; + return HttpResponse(url, 503, CURLE_OK, ""); + } else { + return HttpFake::put(url, data); + } + } + + HttpResponse post(const std::string &url, const Json::Value &data) override { + if (!connectSwitch) { + (void)data; + return HttpResponse(url, 503, CURLE_OK, ""); + } else { + return HttpFake::post(url, data); + } + } + + bool connectSwitch = true; +}; + +TEST(UptaneNetwork, LogConnectivityRestored) { + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path()); + Config config = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + config.uptane.director_server = http->tls_server + "director"; + config.uptane.repo_server = http->tls_server + "repo"; + config.pacman.type = PACKAGE_MANAGER_NONE; + config.provision.primary_ecu_serial = "CA:FE:A6:D2:84:9D"; + config.provision.primary_ecu_hardware_id = "primary_hw"; + config.storage.path = temp_dir.Path(); + config.tls.server = http->tls_server; + + auto storage = INvStorage::newStorage(config.storage); + auto up = std_::make_unique(config, storage, http); + EXPECT_NO_THROW(up->initialize()); + + result::UpdateCheck result = up->fetchMeta(); + EXPECT_EQ(result.status, result::UpdateStatus::kUpdatesAvailable); + + http->connectSwitch = false; + result = up->fetchMeta(); + EXPECT_EQ(result.status, result::UpdateStatus::kError); + + http->connectSwitch = true; + testing::internal::CaptureStdout(); + result = up->fetchMeta(); + EXPECT_EQ(result.status, result::UpdateStatus::kUpdatesAvailable); + EXPECT_NE(std::string::npos, testing::internal::GetCapturedStdout().find("Connectivity is restored.")); +} + #ifndef __NO_MAIN__ int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); diff --git a/src/libaktualizr/uptane/uptane_ostree_test.cc b/src/libaktualizr/uptane/uptane_ostree_test.cc new file mode 100644 index 0000000000..a0775a6085 --- /dev/null +++ b/src/libaktualizr/uptane/uptane_ostree_test.cc @@ -0,0 +1,63 @@ +#include + +#include + +#include "httpfake.h" +#include "libaktualizr/config.h" +#include "package_manager/ostreemanager.h" +#include "storage/invstorage.h" +#include "uptane_test_common.h" +#include "utilities/utils.h" + +boost::filesystem::path test_sysroot; + +TEST(UptaneOstree, InitialManifest) { + TemporaryDirectory temp_dir; + auto http = std::make_shared(temp_dir.Path()); + Config config("tests/config/basic.toml"); + config.pacman.type = PACKAGE_MANAGER_OSTREE; + config.pacman.sysroot = test_sysroot; + config.storage.path = temp_dir.Path(); + config.pacman.booted = BootedType::kStaged; + config.uptane.director_server = http->tls_server + "director"; + config.uptane.repo_server = http->tls_server + "repo"; + config.provision.primary_ecu_serial = "CA:FE:A6:D2:84:9D"; + config.provision.primary_ecu_hardware_id = "primary_hw"; + + auto storage = INvStorage::newStorage(config.storage); + auto sota_client = std_::make_unique(config, storage, http); + EXPECT_NO_THROW(sota_client->initialize()); + auto manifest = sota_client->AssembleManifest(); + // Fish the sha256 hash out of the manifest + auto installed_image = + manifest["ecu_version_manifests"][config.provision.primary_ecu_serial]["signed"]["installed_image"]; + std::string hash = installed_image["fileinfo"]["hashes"]["sha256"].asString(); + + // e3b0c442... is the sha256 hash of the empty string (i.e. echo -n | sha256sum) + EXPECT_NE(hash, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") + << "should not be the hash of the empty string"; + + OstreeManager ostree(config.pacman, config.bootloader, storage, nullptr); + EXPECT_EQ(hash, ostree.getCurrentHash()); +} + +#ifndef __NO_MAIN__ +int main(int argc, char **argv) { + ::testing::InitGoogleTest(&argc, argv); + + if (argc != 2) { + std::cerr << "Error: " << argv[0] << " requires the path to an OSTree sysroot as an input argument.\n"; + return EXIT_FAILURE; + } + + TemporaryDirectory temp_sysroot; + test_sysroot = temp_sysroot / "sysroot"; + // uses cp, as boost doesn't like to copy bad symlinks + int r = system((std::string("cp -r ") + argv[1] + std::string(" ") + test_sysroot.string()).c_str()); + if (r != 0) { + return EXIT_FAILURE; + } + + return RUN_ALL_TESTS(); +} +#endif \ No newline at end of file diff --git a/src/libaktualizr/uptane/uptane_serial_test.cc b/src/libaktualizr/uptane/uptane_serial_test.cc index a229b90d19..c77905638f 100644 --- a/src/libaktualizr/uptane/uptane_serial_test.cc +++ b/src/libaktualizr/uptane/uptane_serial_test.cc @@ -21,15 +21,13 @@ namespace bpo = boost::program_options; -boost::filesystem::path build_dir; - /** - * Check that aktualizr generates random ecu_serial for primary and all - * secondaries. + * Check that aktualizr generates random ecu_serial for Primary and all + * Secondaries. */ TEST(Uptane, RandomSerial) { RecordProperty("zephyr_key", "OTA-989,TST-155"); - // Make two configs, neither of which specify a primary serial. + // Make two configs, neither of which specify a Primary serial. TemporaryDirectory temp_dir1, temp_dir2; Config conf_1("tests/config/basic.toml"); conf_1.storage.path = temp_dir1.Path(); @@ -48,19 +46,19 @@ TEST(Uptane, RandomSerial) { auto http1 = std::make_shared(temp_dir1.Path()); auto http2 = std::make_shared(temp_dir2.Path()); - auto uptane_client1 = UptaneTestCommon::newTestClient(conf_1, storage_1, http1); - EXPECT_NO_THROW(uptane_client1->initialize()); + auto uptane_client1 = std_::make_unique(conf_1, storage_1, http1); + ASSERT_NO_THROW(uptane_client1->initialize()); - auto uptane_client2 = UptaneTestCommon::newTestClient(conf_2, storage_2, http2); - EXPECT_NO_THROW(uptane_client2->initialize()); + auto uptane_client2 = std_::make_unique(conf_2, storage_2, http2); + ASSERT_NO_THROW(uptane_client2->initialize()); // Verify that none of the serials match. EcuSerials ecu_serials_1; EcuSerials ecu_serials_2; - EXPECT_TRUE(storage_1->loadEcuSerials(&ecu_serials_1)); - EXPECT_TRUE(storage_2->loadEcuSerials(&ecu_serials_2)); - EXPECT_EQ(ecu_serials_1.size(), 2); - EXPECT_EQ(ecu_serials_2.size(), 2); + ASSERT_TRUE(storage_1->loadEcuSerials(&ecu_serials_1)); + ASSERT_TRUE(storage_2->loadEcuSerials(&ecu_serials_2)); + ASSERT_EQ(ecu_serials_1.size(), 2); + ASSERT_EQ(ecu_serials_2.size(), 2); EXPECT_FALSE(ecu_serials_1[0].first.ToString().empty()); EXPECT_FALSE(ecu_serials_1[1].first.ToString().empty()); EXPECT_FALSE(ecu_serials_2[0].first.ToString().empty()); @@ -72,9 +70,9 @@ TEST(Uptane, RandomSerial) { } /** - * Check that aktualizr saves random ecu_serial for primary and all secondaries. + * Check that aktualizr saves random ecu_serial for Primary and all Secondaries. * - * Test with a virtual secondary. + * Test with a virtual Secondary. */ TEST(Uptane, ReloadSerial) { RecordProperty("zephyr_key", "OTA-989,TST-156"); @@ -82,21 +80,20 @@ TEST(Uptane, ReloadSerial) { EcuSerials ecu_serials_1; EcuSerials ecu_serials_2; + Config conf("tests/config/basic.toml"); + conf.storage.path = temp_dir.Path(); + conf.provision.primary_ecu_serial = ""; + UptaneTestCommon::addDefaultSecondary(conf, temp_dir, "", "secondary_hardware", false); + // Initialize. Should store new serials. { - Config conf("tests/config/basic.toml"); - conf.storage.path = temp_dir.Path(); - conf.provision.primary_ecu_serial = ""; - auto storage = INvStorage::newStorage(conf.storage); auto http = std::make_shared(temp_dir.Path()); + auto uptane_client = std_::make_unique(conf, storage, http); - UptaneTestCommon::addDefaultSecondary(conf, temp_dir, "", "secondary_hardware", false); - auto uptane_client = UptaneTestCommon::newTestClient(conf, storage, http); - - EXPECT_NO_THROW(uptane_client->initialize()); - EXPECT_TRUE(storage->loadEcuSerials(&ecu_serials_1)); - EXPECT_EQ(ecu_serials_1.size(), 2); + ASSERT_NO_THROW(uptane_client->initialize()); + ASSERT_TRUE(storage->loadEcuSerials(&ecu_serials_1)); + ASSERT_EQ(ecu_serials_1.size(), 2); EXPECT_FALSE(ecu_serials_1[0].first.ToString().empty()); EXPECT_FALSE(ecu_serials_1[1].first.ToString().empty()); } @@ -104,18 +101,13 @@ TEST(Uptane, ReloadSerial) { // Keep storage directory, but initialize new objects. Should load existing // serials. { - Config conf("tests/config/basic.toml"); - conf.storage.path = temp_dir.Path(); - conf.provision.primary_ecu_serial = ""; - auto storage = INvStorage::newStorage(conf.storage); auto http = std::make_shared(temp_dir.Path()); - UptaneTestCommon::addDefaultSecondary(conf, temp_dir, "", "secondary_hardware", false); - auto uptane_client = UptaneTestCommon::newTestClient(conf, storage, http); + auto uptane_client = std_::make_unique(conf, storage, http); - EXPECT_NO_THROW(uptane_client->initialize()); - EXPECT_TRUE(storage->loadEcuSerials(&ecu_serials_2)); - EXPECT_EQ(ecu_serials_2.size(), 2); + ASSERT_NO_THROW(uptane_client->initialize()); + ASSERT_TRUE(storage->loadEcuSerials(&ecu_serials_2)); + ASSERT_EQ(ecu_serials_2.size(), 2); EXPECT_FALSE(ecu_serials_2[0].first.ToString().empty()); EXPECT_FALSE(ecu_serials_2[1].first.ToString().empty()); } @@ -123,7 +115,7 @@ TEST(Uptane, ReloadSerial) { // Verify that serials match across initializations. EXPECT_EQ(ecu_serials_1[0].first, ecu_serials_2[0].first); EXPECT_EQ(ecu_serials_1[1].first, ecu_serials_2[1].first); - // Sanity check that primary and secondary serials do not match. + // Sanity check that Primary and Secondary serials do not match. EXPECT_NE(ecu_serials_1[0].first, ecu_serials_1[1].first); EXPECT_NE(ecu_serials_2[0].first, ecu_serials_2[1].first); } @@ -133,11 +125,6 @@ int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); logger_set_threshold(boost::log::trivial::trace); - if (argc != 2) { - std::cerr << "Error: " << argv[0] << " requires the path to the build directory as an input argument.\n"; - return EXIT_FAILURE; - } - build_dir = argv[1]; return RUN_ALL_TESTS(); } #endif diff --git a/src/libaktualizr/uptane/uptane_test.cc b/src/libaktualizr/uptane/uptane_test.cc index 0d1be6eda4..a33ef39a23 100644 --- a/src/libaktualizr/uptane/uptane_test.cc +++ b/src/libaktualizr/uptane/uptane_test.cc @@ -15,12 +15,13 @@ #include "crypto/p11engine.h" #include "httpfake.h" -#include "primary/initializer.h" +#include "libaktualizr/secondaryinterface.h" +#include "primary/provisioner.h" +#include "primary/provisioner_test_utils.h" #include "primary/sotauptaneclient.h" #include "storage/fsstorage_read.h" #include "storage/invstorage.h" #include "test_utils.h" -#include "uptane/secondaryinterface.h" #include "uptane/tuf.h" #include "uptane/uptanerepository.h" #include "uptane_test_common.h" @@ -52,7 +53,7 @@ TEST(Uptane, Verify) { Uptane::Root(Uptane::RepositoryType::Director(), response.getJson(), root); } -/* Throw an exception if a TUF root is unsigned. */ +/* Throw an exception if Root metadata is unsigned. */ TEST(Uptane, VerifyDataBad) { TemporaryDirectory temp_dir; auto http = std::make_shared(temp_dir.Path()); @@ -69,7 +70,7 @@ TEST(Uptane, VerifyDataBad) { EXPECT_THROW(Uptane::Root(Uptane::RepositoryType::Director(), data_json, root), Uptane::UnmetThreshold); } -/* Throw an exception if a TUF root has unknown signature types. */ +/* Throw an exception if Root metadata has unknown signature types. */ TEST(Uptane, VerifyDataUnknownType) { TemporaryDirectory temp_dir; auto http = std::make_shared(temp_dir.Path()); @@ -87,7 +88,7 @@ TEST(Uptane, VerifyDataUnknownType) { EXPECT_THROW(Uptane::Root(Uptane::RepositoryType::Director(), data_json, root), Uptane::SecurityException); } -/* Throw an exception if a TUF root has invalid key IDs. */ +/* Throw an exception if Root metadata has invalid key IDs. */ TEST(Uptane, VerifyDataBadKeyId) { TemporaryDirectory temp_dir; auto http = std::make_shared(temp_dir.Path()); @@ -105,7 +106,7 @@ TEST(Uptane, VerifyDataBadKeyId) { EXPECT_THROW(Uptane::Root(Uptane::RepositoryType::Director(), data_json, root), Uptane::BadKeyId); } -/* Throw an exception if a TUF root signature threshold is invalid. */ +/* Throw an exception if Root metadata signature threshold is invalid. */ TEST(Uptane, VerifyDataBadThreshold) { TemporaryDirectory temp_dir; auto http = std::make_shared(temp_dir.Path()); @@ -126,8 +127,8 @@ TEST(Uptane, VerifyDataBadThreshold) { } } -/* Get manifest from primary. - * Get manifest from secondaries. */ +/* Get manifest from Primary. + * Get manifest from Secondaries. */ TEST(Uptane, AssembleManifestGood) { TemporaryDirectory temp_dir; auto http = std::make_shared(temp_dir.Path()); @@ -141,11 +142,11 @@ TEST(Uptane, AssembleManifestGood) { config.uptane.director_server = http->tls_server + "/director"; config.uptane.repo_server = http->tls_server + "/repo"; config.provision.primary_ecu_serial = "testecuserial"; - config.pacman.type = PackageManager::kNone; + config.pacman.type = PACKAGE_MANAGER_NONE; UptaneTestCommon::addDefaultSecondary(config, temp_dir, "secondary_ecu_serial", "secondary_hardware"); auto storage = INvStorage::newStorage(config.storage); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); + auto sota_client = std_::make_unique(config, storage, http); EXPECT_NO_THROW(sota_client->initialize()); Json::Value manifest = sota_client->AssembleManifest()["ecu_version_manifests"]; @@ -155,6 +156,13 @@ TEST(Uptane, AssembleManifestGood) { // Manifest should not have an installation result yet. EXPECT_FALSE(manifest["testecuserial"]["signed"].isMember("custom")); EXPECT_FALSE(manifest["secondary_ecu_serial"]["signed"].isMember("custom")); + + std::string counter_str = manifest["testecuserial"]["signed"]["report_counter"].asString(); + int64_t primary_ecu_report_counter = std::stoll(counter_str); + Json::Value manifest2 = sota_client->AssembleManifest()["ecu_version_manifests"]; + std::string counter_str2 = manifest2["testecuserial"]["signed"]["report_counter"].asString(); + int64_t primary_ecu_report_counter2 = std::stoll(counter_str2); + EXPECT_EQ(primary_ecu_report_counter2, primary_ecu_report_counter + 1); } /* Bad signatures are ignored when assembling the manifest. */ @@ -171,19 +179,19 @@ TEST(Uptane, AssembleManifestBad) { config.uptane.director_server = http->tls_server + "/director"; config.uptane.repo_server = http->tls_server + "/repo"; config.provision.primary_ecu_serial = "testecuserial"; - config.pacman.type = PackageManager::kNone; + config.pacman.type = PACKAGE_MANAGER_NONE; Primary::VirtualSecondaryConfig ecu_config = UptaneTestCommon::addDefaultSecondary(config, temp_dir, "secondary_ecu_serial", "secondary_hardware"); - /* Overwrite the secondary's keys on disk. */ + /* Overwrite the Secondary's keys on disk. */ std::string private_key, public_key; - Crypto::generateKeyPair(ecu_config.key_type, &public_key, &private_key); + ASSERT_TRUE(Crypto::generateKeyPair(ecu_config.key_type, &public_key, &private_key)); Utils::writeFile(ecu_config.full_client_dir / ecu_config.ecu_private_key, private_key); public_key = Utils::readFile("tests/test_data/public.key"); Utils::writeFile(ecu_config.full_client_dir / ecu_config.ecu_public_key, public_key); auto storage = INvStorage::newStorage(config.storage); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); + auto sota_client = std_::make_unique(config, storage, http); EXPECT_NO_THROW(sota_client->initialize()); Json::Value manifest = sota_client->AssembleManifest()["ecu_version_manifests"]; @@ -194,8 +202,8 @@ TEST(Uptane, AssembleManifestBad) { EXPECT_FALSE(manifest["secondary_ecu_serial"]["signed"].isMember("custom")); } -/* Get manifest from primary. - * Get manifest from secondaries. +/* Get manifest from Primary. + * Get manifest from Secondaries. * Send manifest to the server. */ TEST(Uptane, PutManifest) { TemporaryDirectory temp_dir; @@ -203,19 +211,20 @@ TEST(Uptane, PutManifest) { Config config = config_common(); config.storage.path = temp_dir.Path(); boost::filesystem::copy_file("tests/test_data/cred.zip", (temp_dir / "cred.zip").string()); - boost::filesystem::copy_file("tests/test_data/firmware.txt", (temp_dir / "firmware.txt").string()); - boost::filesystem::copy_file("tests/test_data/firmware_name.txt", (temp_dir / "firmware_name.txt").string()); config.provision.provision_path = temp_dir / "cred.zip"; config.provision.mode = ProvisionMode::kSharedCred; config.uptane.director_server = http->tls_server + "/director"; config.uptane.repo_server = http->tls_server + "/repo"; config.provision.primary_ecu_serial = "testecuserial"; - config.pacman.type = PackageManager::kNone; - UptaneTestCommon::addDefaultSecondary(config, temp_dir, "secondary_ecu_serial", "secondary_hardware"); + config.pacman.type = PACKAGE_MANAGER_NONE; + Primary::VirtualSecondaryConfig sec_config = + UptaneTestCommon::addDefaultSecondary(config, temp_dir, "secondary_ecu_serial", "secondary_hardware"); + boost::filesystem::copy_file("tests/test_data/firmware.txt", sec_config.firmware_path); + boost::filesystem::copy_file("tests/test_data/firmware_name.txt", sec_config.target_name_path); auto storage = INvStorage::newStorage(config.storage); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); + auto sota_client = std_::make_unique(config, storage, http); EXPECT_NO_THROW(sota_client->initialize()); EXPECT_TRUE(sota_client->putManifestSimple()); @@ -268,7 +277,7 @@ TEST(Uptane, PutManifestError) { std::function event)> f_cb = process_events_PutManifestError; events_channel->connect(f_cb); num_events_PutManifestError = 0; - auto sota_client = UptaneTestCommon::newTestClient(conf, storage, http, events_channel); + auto sota_client = std_::make_unique(conf, storage, http, events_channel); EXPECT_NO_THROW(sota_client->initialize()); auto result = sota_client->putManifest(); EXPECT_FALSE(result); @@ -293,7 +302,7 @@ TEST(Uptane, FetchMetaFail) { conf.tls.server = http->tls_server; auto storage = INvStorage::newStorage(conf.storage); - auto up = UptaneTestCommon::newTestClient(conf, storage, http); + auto up = std_::make_unique(conf, storage, http); EXPECT_NO_THROW(up->initialize()); result::UpdateCheck result = up->fetchMeta(); @@ -316,9 +325,12 @@ void process_events_Install(const std::shared_ptr &event) { auto concrete_event = std::static_pointer_cast(event); if (num_events_AllInstalls == 0) { EXPECT_TRUE(concrete_event->result.dev_report.isSuccess()); - } else { + } else if (num_events_AllInstalls == 1) { EXPECT_FALSE(concrete_event->result.dev_report.isSuccess()); EXPECT_EQ(concrete_event->result.dev_report.result_code, data::ResultCode::Numeric::kAlreadyProcessed); + } else { + EXPECT_FALSE(concrete_event->result.dev_report.isSuccess()); + EXPECT_EQ(concrete_event->result.dev_report.result_code, data::ResultCode::Numeric::kInternalError); } num_events_AllInstalls++; } @@ -328,11 +340,13 @@ void process_events_Install(const std::shared_ptr &event) { * Verify successful installation of a package. * * Identify ECU for each target. - * Check if there are updates to install for the primary. - * Install a binary update on the primary. - * Store installation result for primary. + * Check if there are updates to install for the Primary. + * Install a binary update on the Primary. + * Store installation result for Primary. * Store installation result for device. * Check if an update is already installed. + * Reject an update that matches the currently installed version's filename but + * not the length and/or hashes. */ TEST(Uptane, InstallFakeGood) { Config conf("tests/config/basic.toml"); @@ -340,7 +354,8 @@ TEST(Uptane, InstallFakeGood) { auto http = std::make_shared(temp_dir.Path(), "hasupdates"); conf.uptane.director_server = http->tls_server + "director"; conf.uptane.repo_server = http->tls_server + "repo"; - conf.pacman.type = PackageManager::kNone; + conf.pacman.type = PACKAGE_MANAGER_NONE; + conf.pacman.images_path = temp_dir.Path() / "images"; conf.provision.primary_ecu_serial = "CA:FE:A6:D2:84:9D"; conf.provision.primary_ecu_hardware_id = "primary_hw"; conf.storage.path = temp_dir.Path(); @@ -352,7 +367,7 @@ TEST(Uptane, InstallFakeGood) { auto events_channel = std::make_shared(); std::function event)> f_cb = process_events_Install; events_channel->connect(f_cb); - auto up = UptaneTestCommon::newTestClient(conf, storage, http, events_channel); + auto up = std_::make_unique(conf, storage, http, events_channel); EXPECT_NO_THROW(up->initialize()); result::UpdateCheck update_result = up->fetchMeta(); @@ -383,7 +398,8 @@ TEST(Uptane, InstallFakeGood) { EXPECT_EQ(installation_report["items"][1]["result"]["success"].asBool(), true); EXPECT_EQ(installation_report["items"][1]["result"]["code"].asString(), "OK"); - // second install + // Second install to verify that we detect already installed updates + // correctly. result::Install install_result2 = up->uptaneInstall(download_result.updates); EXPECT_FALSE(install_result2.dev_report.isSuccess()); EXPECT_EQ(install_result2.dev_report.result_code, data::ResultCode::Numeric::kAlreadyProcessed); @@ -392,6 +408,28 @@ TEST(Uptane, InstallFakeGood) { manifest = up->AssembleManifest(); installation_report = manifest["installation_report"]["report"]; EXPECT_EQ(installation_report["result"]["success"].asBool(), false); + + // Recheck updates in order to repopulate Director Targets metadata in the + // database (it gets dropped after failure). + result::UpdateCheck update_result2 = up->fetchMeta(); + EXPECT_EQ(update_result2.status, result::UpdateStatus::kNoUpdatesAvailable); + + // Remove the hashes from the current Target version stored in the database + // for the Primary. + boost::optional current_version; + EXPECT_TRUE(storage->loadInstalledVersions("CA:FE:A6:D2:84:9D", ¤t_version, nullptr)); + const auto bad_target = Uptane::Target(current_version->filename(), current_version->ecus(), std::vector{}, + current_version->length()); + storage->saveInstalledVersion("CA:FE:A6:D2:84:9D", bad_target, InstalledVersionUpdateMode::kCurrent); + + // Third install to verify that we reject updates with the same filename but + // different contents. + result::Install install_result3 = up->uptaneInstall(download_result.updates); + EXPECT_FALSE(install_result3.dev_report.isSuccess()); + EXPECT_EQ(install_result3.dev_report.result_code, data::ResultCode::Numeric::kInternalError); + EXPECT_THROW(std::rethrow_exception(up->getLastException()), Uptane::TargetContentMismatch); + EXPECT_EQ(num_events_InstallTarget, 2); + EXPECT_EQ(num_events_AllInstalls, 3); } /* @@ -404,7 +442,8 @@ TEST(Uptane, InstallFakeBad) { auto http = std::make_shared(temp_dir.Path(), "hasupdates"); conf.uptane.director_server = http->tls_server + "director"; conf.uptane.repo_server = http->tls_server + "repo"; - conf.pacman.type = PackageManager::kNone; + conf.pacman.type = PACKAGE_MANAGER_NONE; + conf.pacman.images_path = temp_dir.Path() / "images"; conf.provision.primary_ecu_serial = "CA:FE:A6:D2:84:9D"; conf.provision.primary_ecu_hardware_id = "primary_hw"; conf.storage.path = temp_dir.Path(); @@ -414,7 +453,7 @@ TEST(Uptane, InstallFakeBad) { auto storage = INvStorage::newStorage(conf.storage); std::function event)> f_cb = process_events_Install; - auto up = UptaneTestCommon::newTestClient(conf, storage, http, nullptr); + auto up = std_::make_unique(conf, storage, http); EXPECT_NO_THROW(up->initialize()); result::UpdateCheck update_result = up->fetchMeta(); @@ -424,29 +463,30 @@ TEST(Uptane, InstallFakeBad) { std::string hash = download_result.updates[0].sha256Hash(); std::transform(hash.begin(), hash.end(), hash.begin(), ::toupper); - boost::filesystem::path image = temp_dir / "images" / hash; + auto image = (conf.pacman.images_path / hash).string(); // Overwrite the file on disk with garbage so that the target verification // fails. First read the existing data so we can re-write it later. - auto rhandle = storage->openTargetFile(download_result.updates[0]); const uint64_t length = download_result.updates[0].length(); - uint8_t content[length]; - EXPECT_EQ(rhandle->rread(content, length), length); - rhandle->rclose(); - auto whandle = storage->allocateTargetFile(false, download_result.updates[0]); - uint8_t content_bad[length + 1]; + char content[length]; + auto r = std::ifstream(image, std::ios::binary); + r.read(content, static_cast(length)); + EXPECT_EQ(r.gcount(), length); + r.close(); + auto w = std::ofstream(image, std::ios::binary | std::ios::ate); + char content_bad[length + 1]; memset(content_bad, 0, length + 1); - EXPECT_EQ(whandle->wfeed(content_bad, 3), 3); - whandle->wcommit(); + w.write(content_bad, 3); + w.close(); result::Install install_result = up->uptaneInstall(download_result.updates); EXPECT_FALSE(install_result.dev_report.isSuccess()); EXPECT_EQ(install_result.dev_report.result_code, data::ResultCode::Numeric::kInternalError); // Try again with oversized data. - whandle = storage->allocateTargetFile(false, download_result.updates[0]); - EXPECT_EQ(whandle->wfeed(content_bad, length + 1), length + 1); - whandle->wcommit(); + w = std::ofstream(image, std::ios::binary | std::ios::ate); + w.write(content_bad, static_cast(length + 1)); + w.close(); install_result = up->uptaneInstall(download_result.updates); EXPECT_FALSE(install_result.dev_report.isSuccess()); @@ -454,27 +494,27 @@ TEST(Uptane, InstallFakeBad) { // Try again with equally long data to make sure the hash check actually gets // triggered. - whandle = storage->allocateTargetFile(false, download_result.updates[0]); - EXPECT_EQ(whandle->wfeed(content_bad, length), length); - whandle->wcommit(); + w = std::ofstream(image, std::ios::binary | std::ios::ate); + w.write(content_bad, static_cast(length)); + w.close(); install_result = up->uptaneInstall(download_result.updates); EXPECT_FALSE(install_result.dev_report.isSuccess()); EXPECT_EQ(install_result.dev_report.result_code, data::ResultCode::Numeric::kInternalError); // Try with the real data, but incomplete. - whandle = storage->allocateTargetFile(false, download_result.updates[0]); - EXPECT_EQ(whandle->wfeed(reinterpret_cast(content), length - 1), length - 1); - whandle->wcommit(); + w = std::ofstream(image, std::ios::binary | std::ios::ate); + w.write(content, static_cast(length - 1)); + w.close(); install_result = up->uptaneInstall(download_result.updates); EXPECT_FALSE(install_result.dev_report.isSuccess()); EXPECT_EQ(install_result.dev_report.result_code, data::ResultCode::Numeric::kInternalError); // Restore the original data to the file so that verification succeeds. - whandle = storage->allocateTargetFile(false, download_result.updates[0]); - EXPECT_EQ(whandle->wfeed(reinterpret_cast(content), length), length); - whandle->wcommit(); + w = std::ofstream(image, std::ios::binary | std::ios::ate); + w.write(content, static_cast(length)); + w.close(); install_result = up->uptaneInstall(download_result.updates); EXPECT_TRUE(install_result.dev_report.isSuccess()); @@ -499,17 +539,19 @@ class HttpFakeEvents : public HttpFake { } }; -class SecondaryInterfaceMock : public Uptane::SecondaryInterface { +class SecondaryInterfaceMock : public SecondaryInterface { public: explicit SecondaryInterfaceMock(Primary::VirtualSecondaryConfig &sconfig_in) : sconfig(std::move(sconfig_in)) { std::string private_key, public_key; - Crypto::generateKeyPair(sconfig.key_type, &public_key, &private_key); + if (!Crypto::generateKeyPair(sconfig.key_type, &public_key, &private_key)) { + throw std::runtime_error("Key generation failure"); + } public_key_ = PublicKey(public_key, sconfig.key_type); Json::Value manifest_unsigned; manifest_unsigned["key"] = "value"; std::string b64sig = Utils::toBase64( - Crypto::Sign(sconfig.key_type, nullptr, private_key, Json::FastWriter().write(manifest_unsigned))); + Crypto::Sign(sconfig.key_type, nullptr, private_key, Utils::jsonToCanonicalStr(manifest_unsigned))); Json::Value signature; signature["method"] = "rsassa-pss"; signature["sig"] = b64sig; @@ -517,41 +559,59 @@ class SecondaryInterfaceMock : public Uptane::SecondaryInterface { manifest_["signed"] = manifest_unsigned; manifest_["signatures"].append(signature); } - PublicKey getPublicKey() override { return public_key_; } + void init(std::shared_ptr secondary_provider_in) override { + secondary_provider_ = std::move(secondary_provider_in); + } + std::string Type() const override { return "mock"; } + PublicKey getPublicKey() const override { return public_key_; } - Uptane::HardwareIdentifier getHwId() override { return Uptane::HardwareIdentifier(sconfig.ecu_hardware_id); } - Uptane::EcuSerial getSerial() override { + Uptane::HardwareIdentifier getHwId() const override { return Uptane::HardwareIdentifier(sconfig.ecu_hardware_id); } + Uptane::EcuSerial getSerial() const override { if (!sconfig.ecu_serial.empty()) { return Uptane::EcuSerial(sconfig.ecu_serial); } return Uptane::EcuSerial(public_key_.KeyId()); } - Json::Value getManifest() override { return manifest_; } - MOCK_METHOD1(putMetadataMock, bool(const Uptane::RawMetaPack &)); - MOCK_METHOD1(getRootVersionMock, int32_t(bool)); + Uptane::Manifest getManifest() const override { return manifest_; } + bool ping() const override { return true; } + MOCK_METHOD(bool, putMetadataMock, (const Uptane::MetaBundle &)); + MOCK_METHOD(int32_t, getRootVersionMock, (bool), (const)); + + data::InstallationResult putMetadata(const Uptane::Target &target) override { + Uptane::MetaBundle meta_bundle; + if (!secondary_provider_->getMetadata(&meta_bundle, target)) { + return data::InstallationResult(data::ResultCode::Numeric::kInternalError, + "Unable to load stored metadata from Primary"); + } + putMetadataMock(meta_bundle); + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); + } + int32_t getRootVersion(bool director) const override { return getRootVersionMock(director); } - bool putMetadata(const Uptane::RawMetaPack &meta_pack) override { return putMetadataMock(meta_pack); } - int32_t getRootVersion(bool director) override { return getRootVersionMock(director); } + data::InstallationResult putRoot(const std::string &, bool) override { + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); + } + virtual data::InstallationResult sendFirmware(const Uptane::Target &) override { + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); + } + virtual data::InstallationResult install(const Uptane::Target &) override { + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); + } - bool putRoot(const std::string &, bool) override { return true; } - bool sendFirmware(const std::shared_ptr &) override { return true; } + std::shared_ptr secondary_provider_; PublicKey public_key_; Json::Value manifest_; Primary::VirtualSecondaryConfig sconfig; }; -MATCHER_P(matchMeta, meta, "") { - return (arg.director_root == meta.director_root) && (arg.image_root == meta.image_root) && - (arg.director_targets == meta.director_targets) && (arg.image_timestamp == meta.image_timestamp) && - (arg.image_snapshot == meta.image_snapshot) && (arg.image_targets == meta.image_targets); -} +MATCHER_P(matchMeta, meta_bundle, "") { return (arg == meta_bundle); } /* - * Send metadata to secondary ECUs - * Send EcuInstallationStartedReport to server for secondaries + * Send metadata to Secondary ECUs + * Send EcuInstallationStartedReport to server for Secondaries */ -TEST(Uptane, SendMetadataToSeconadry) { +TEST(Uptane, SendMetadataToSecondary) { Config conf("tests/config/basic.toml"); TemporaryDirectory temp_dir; auto http = std::make_shared(temp_dir.Path(), "hasupdates"); @@ -559,6 +619,7 @@ TEST(Uptane, SendMetadataToSeconadry) { conf.provision.primary_ecu_hardware_id = "primary_hw"; conf.uptane.director_server = http->tls_server + "/director"; conf.uptane.repo_server = http->tls_server + "/repo"; + conf.pacman.images_path = temp_dir.Path() / "images"; conf.storage.path = temp_dir.Path(); conf.tls.server = http->tls_server; @@ -575,21 +636,28 @@ TEST(Uptane, SendMetadataToSeconadry) { auto sec = std::make_shared(ecu_config); auto storage = INvStorage::newStorage(conf.storage); - auto up = UptaneTestCommon::newTestClient(conf, storage, http); - up->addNewSecondary(sec); + auto up = std_::make_unique(conf, storage, http); + up->addSecondary(sec); EXPECT_NO_THROW(up->initialize()); result::UpdateCheck update_result = up->fetchMeta(); EXPECT_EQ(update_result.status, result::UpdateStatus::kUpdatesAvailable); - Uptane::RawMetaPack meta; - storage->loadLatestRoot(&meta.director_root, Uptane::RepositoryType::Director()); - storage->loadNonRoot(&meta.director_targets, Uptane::RepositoryType::Director(), Uptane::Role::Targets()); - storage->loadLatestRoot(&meta.image_root, Uptane::RepositoryType::Image()); - storage->loadNonRoot(&meta.image_timestamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()); - storage->loadNonRoot(&meta.image_snapshot, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()); - storage->loadNonRoot(&meta.image_targets, Uptane::RepositoryType::Image(), Uptane::Role::Targets()); - - EXPECT_CALL(*sec, putMetadataMock(matchMeta(meta))); + Uptane::MetaBundle meta_bundle; + std::string metadata; + storage->loadLatestRoot(&metadata, Uptane::RepositoryType::Director()); + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Director(), Uptane::Role::Root()), metadata); + storage->loadNonRoot(&metadata, Uptane::RepositoryType::Director(), Uptane::Role::Targets()); + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Director(), Uptane::Role::Targets()), metadata); + storage->loadLatestRoot(&metadata, Uptane::RepositoryType::Image()); + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Root()), metadata); + storage->loadNonRoot(&metadata, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()); + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()), metadata); + storage->loadNonRoot(&metadata, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()); + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()), metadata); + storage->loadNonRoot(&metadata, Uptane::RepositoryType::Image(), Uptane::Role::Targets()); + meta_bundle.emplace(std::make_pair(Uptane::RepositoryType::Image(), Uptane::Role::Targets()), metadata); + + EXPECT_CALL(*sec, putMetadataMock(matchMeta(meta_bundle))); result::Download download_result = up->downloadImages(update_result.updates); EXPECT_EQ(download_result.status, result::DownloadStatus::kSuccess); result::Install install_result = up->uptaneInstall(download_result.updates); @@ -598,7 +666,7 @@ TEST(Uptane, SendMetadataToSeconadry) { EXPECT_TRUE(EcuInstallationStartedReportGot); } -/* Register secondary ECUs with director. */ +/* Register Secondary ECUs with Director. */ TEST(Uptane, UptaneSecondaryAdd) { TemporaryDirectory temp_dir; auto http = std::make_shared(temp_dir.Path()); @@ -611,15 +679,15 @@ TEST(Uptane, UptaneSecondaryAdd) { config.tls.server = http->tls_server; config.provision.primary_ecu_serial = "testecuserial"; config.storage.path = temp_dir.Path(); - config.pacman.type = PackageManager::kNone; + config.pacman.type = PACKAGE_MANAGER_NONE; UptaneTestCommon::addDefaultSecondary(config, temp_dir, "secondary_ecu_serial", "secondary_hardware"); auto storage = INvStorage::newStorage(config.storage); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); + auto sota_client = std_::make_unique(config, storage, http); EXPECT_NO_THROW(sota_client->initialize()); /* Verify the correctness of the metadata sent to the server about the - * secondary. */ + * Secondary. */ Json::Value ecu_data = Utils::parseJSONFile(temp_dir / "post.json"); EXPECT_EQ(ecu_data["ecus"].size(), 2); EXPECT_EQ(ecu_data["primary_ecu_serial"].asString(), config.provision.primary_ecu_serial); @@ -629,7 +697,7 @@ TEST(Uptane, UptaneSecondaryAdd) { EXPECT_TRUE(ecu_data["ecus"][1]["clientKey"]["keyval"]["public"].asString().size() > 0); } -/* Adding multiple secondaries with the same serial throws an error */ +/* Adding multiple Secondaries with the same serial throws an error */ TEST(Uptane, UptaneSecondaryAddSameSerial) { TemporaryDirectory temp_dir; auto http = std::make_shared(temp_dir.Path()); @@ -637,86 +705,19 @@ TEST(Uptane, UptaneSecondaryAddSameSerial) { Config config = config_common(); config.provision.provision_path = temp_dir / "cred.zip"; config.provision.mode = ProvisionMode::kSharedCred; - config.pacman.type = PackageManager::kNone; + config.pacman.type = PACKAGE_MANAGER_NONE; config.storage.path = temp_dir.Path(); UptaneTestCommon::addDefaultSecondary(config, temp_dir, "secondary_ecu_serial", "secondary_hardware"); auto storage = INvStorage::newStorage(config.storage); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); + auto sota_client = std_::make_unique(config, storage, http); UptaneTestCommon::addDefaultSecondary(config, temp_dir, "secondary_ecu_serial", "secondary_hardware_new"); - EXPECT_THROW(sota_client->addNewSecondary(std::make_shared( + EXPECT_THROW(sota_client->addSecondary(std::make_shared( Primary::VirtualSecondaryConfig::create_from_file(config.uptane.secondary_config_file)[0])), std::runtime_error); } -/* - * Identify previously unknown secondaries - * Identify currently unavailable secondaries - */ -TEST(Uptane, UptaneSecondaryMisconfigured) { - TemporaryDirectory temp_dir; - boost::filesystem::copy_file("tests/test_data/cred.zip", temp_dir / "cred.zip"); - auto http = std::make_shared(temp_dir.Path()); - { - Config config = config_common(); - config.provision.provision_path = temp_dir / "cred.zip"; - config.provision.mode = ProvisionMode::kSharedCred; - config.pacman.type = PackageManager::kNone; - config.storage.path = temp_dir.Path(); - UptaneTestCommon::addDefaultSecondary(config, temp_dir, "secondary_ecu_serial", "secondary_hardware"); - - auto storage = INvStorage::newStorage(config.storage); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); - EXPECT_NO_THROW(sota_client->initialize()); - - std::vector ecus; - storage->loadMisconfiguredEcus(&ecus); - EXPECT_EQ(ecus.size(), 0); - } - { - Config config = config_common(); - config.provision.provision_path = temp_dir / "cred.zip"; - config.provision.mode = ProvisionMode::kSharedCred; - config.pacman.type = PackageManager::kNone; - config.storage.path = temp_dir.Path(); - auto storage = INvStorage::newStorage(config.storage); - UptaneTestCommon::addDefaultSecondary(config, temp_dir, "new_secondary_ecu_serial", "new_secondary_hardware"); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); - EXPECT_NO_THROW(sota_client->initialize()); - - std::vector ecus; - storage->loadMisconfiguredEcus(&ecus); - EXPECT_EQ(ecus.size(), 2); - if (ecus[0].serial.ToString() == "new_secondary_ecu_serial") { - EXPECT_EQ(ecus[0].state, EcuState::kNotRegistered); - EXPECT_EQ(ecus[1].serial.ToString(), "secondary_ecu_serial"); - EXPECT_EQ(ecus[1].state, EcuState::kOld); - } else if (ecus[0].serial.ToString() == "secondary_ecu_serial") { - EXPECT_EQ(ecus[0].state, EcuState::kOld); - EXPECT_EQ(ecus[1].serial.ToString(), "new_secondary_ecu_serial"); - EXPECT_EQ(ecus[1].state, EcuState::kNotRegistered); - } else { - FAIL() << "Unexpected secondary serial in storage: " << ecus[0].serial.ToString(); - } - } - { - Config config = config_common(); - config.provision.provision_path = temp_dir / "cred.zip"; - config.provision.mode = ProvisionMode::kSharedCred; - config.pacman.type = PackageManager::kNone; - config.storage.path = temp_dir.Path(); - auto storage = INvStorage::newStorage(config.storage); - UptaneTestCommon::addDefaultSecondary(config, temp_dir, "secondary_ecu_serial", "secondary_hardware"); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); - EXPECT_NO_THROW(sota_client->initialize()); - - std::vector ecus; - storage->loadMisconfiguredEcus(&ecus); - EXPECT_EQ(ecus.size(), 0); - } -} - /** * Check that basic device info sent by aktualizr during provisioning matches * our expectations. @@ -726,8 +727,24 @@ TEST(Uptane, UptaneSecondaryMisconfigured) { */ class HttpFakeProv : public HttpFake { public: - HttpFakeProv(const boost::filesystem::path &test_dir_in, std::string flavor = "") - : HttpFake(test_dir_in, std::move(flavor)) {} + HttpFakeProv(const boost::filesystem::path &test_dir_in, std::string flavor, Config &config_in) + : HttpFake(test_dir_in, std::move(flavor)), config(config_in) {} + + HttpResponse post(const std::string &url, const std::string &content_type, const std::string &data) override { + std::cout << "post " << url << "\n"; + + if (url.find("/system_info/config") != std::string::npos) { + /* Send libaktualizr configuration to the server. */ + config_count++; + std::stringstream conf_ss; + config.writeToStream(conf_ss); + EXPECT_EQ(data, conf_ss.str()); + EXPECT_EQ(content_type, "application/toml"); + } else { + EXPECT_EQ(0, 1) << "Unexpected post to URL: " << url; + } + return HttpFake::post(url, content_type, data); + } HttpResponse post(const std::string &url, const Json::Value &data) override { std::cout << "post " << url << "\n"; @@ -737,7 +754,7 @@ class HttpFakeProv : public HttpFake { EXPECT_EQ(data["deviceId"].asString(), "tst149_device_id"); return HttpResponse(Utils::readFile("tests/test_data/cred.p12"), 200, CURLE_OK, ""); } else if (url.find("/director/ecus") != std::string::npos) { - /* Register primary ECU with director. */ + /* Register Primary ECU with Director. */ ecus_count++; EXPECT_EQ(data["primary_ecu_serial"].asString(), "CA:FE:A6:D2:84:9D"); EXPECT_EQ(data["ecus"][0]["hardware_identifier"].asString(), "primary_hw"); @@ -754,13 +771,12 @@ class HttpFakeProv : public HttpFake { return HttpResponse("", 400, CURLE_OK, ""); } - HttpResponse handle_event(const std::string &url, const Json::Value &data) override { - (void)url; - if (data[0]["eventType"]["id"] == "DownloadProgressReport") { + HttpResponse handle_event_single(const Json::Value &event) { + if (event["eventType"]["id"] == "DownloadProgressReport") { return HttpResponse("", 200, CURLE_OK, ""); } - const std::string event_type = data[0]["eventType"]["id"].asString(); - const std::string serial = data[0]["event"]["ecu"].asString(); + const std::string event_type = event["eventType"]["id"].asString(); + const std::string serial = event["event"]["ecu"].asString(); std::cout << "Got " << event_type << " event\n"; ++events_seen; switch (events_seen) { @@ -792,12 +808,12 @@ class HttpFakeProv : public HttpFake { } break; case 5: - /* Send EcuInstallationStartedReport to server for primary. */ + /* Send EcuInstallationStartedReport to server for Primary. */ EXPECT_EQ(event_type, "EcuInstallationStarted"); EXPECT_EQ(serial, "CA:FE:A6:D2:84:9D"); break; case 6: - /* Send EcuInstallationCompletedReport to server for primary. */ + /* Send EcuInstallationCompletedReport to server for Primary. */ EXPECT_EQ(event_type, "EcuInstallationCompleted"); EXPECT_EQ(serial, "CA:FE:A6:D2:84:9D"); break; @@ -818,6 +834,14 @@ class HttpFakeProv : public HttpFake { return HttpResponse("", 200, CURLE_OK, ""); } + HttpResponse handle_event(const std::string &url, const Json::Value &data) override { + (void)url; + for (const Json::Value &ev : data) { + handle_event_single(ev); + } + return HttpResponse("", 200, CURLE_OK, ""); + } + HttpResponse put(const std::string &url, const Json::Value &data) override { std::cout << "put " << url << "\n"; if (url.find("core/installed") != std::string::npos) { @@ -826,29 +850,21 @@ class HttpFakeProv : public HttpFake { EXPECT_EQ(data.size(), 1); EXPECT_EQ(data[0]["name"].asString(), "fake-package"); EXPECT_EQ(data[0]["version"].asString(), "1.0"); - } else if (url.find("/core/system_info") != std::string::npos) { - /* Send hardware info to the server. */ - system_info_count++; - Json::Value hwinfo = Utils::getHardwareInfo(); - EXPECT_EQ(hwinfo["id"].asString(), data["id"].asString()); - EXPECT_EQ(hwinfo["description"].asString(), data["description"].asString()); - EXPECT_EQ(hwinfo["class"].asString(), data["class"].asString()); - EXPECT_EQ(hwinfo["product"].asString(), data["product"].asString()); } else if (url.find("/director/manifest") != std::string::npos) { - /* Get manifest from primary. - * Get primary installation result. + /* Get manifest from Primary. + * Get Primary installation result. * Send manifest to the server. */ manifest_count++; std::string file_primary; std::string file_secondary; std::string hash_primary; std::string hash_secondary; - if (manifest_count <= 2) { + if (manifest_count <= 1) { file_primary = "unknown"; file_secondary = "noimage"; // Check for default initial value of packagemanagerfake. - hash_primary = boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha256digest(""))); - hash_secondary = boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha256digest(""))); + hash_primary = Crypto::sha256digestHex(""); + hash_secondary = Crypto::sha256digestHex(""); } else { file_primary = "primary_firmware.txt"; file_secondary = "secondary_firmware.txt"; @@ -871,6 +887,18 @@ class HttpFakeProv : public HttpFake { EXPECT_EQ(nwinfo["local_ipv4"].asString(), data["local_ipv4"].asString()); EXPECT_EQ(nwinfo["mac"].asString(), data["mac"].asString()); EXPECT_EQ(nwinfo["hostname"].asString(), data["hostname"].asString()); + } else if (url.find("/system_info") != std::string::npos) { + /* Send hardware info to the server. */ + system_info_count++; + if (system_info_count <= 2) { + Json::Value hwinfo = Utils::getHardwareInfo(); + EXPECT_EQ(hwinfo["id"].asString(), data["id"].asString()); + EXPECT_EQ(hwinfo["description"].asString(), data["description"].asString()); + EXPECT_EQ(hwinfo["class"].asString(), data["class"].asString()); + EXPECT_EQ(hwinfo["product"].asString(), data["product"].asString()); + } else { + EXPECT_EQ(custom_hw_info, data); + } } else { EXPECT_EQ(0, 1) << "Unexpected put to URL: " << url; } @@ -885,8 +913,11 @@ class HttpFakeProv : public HttpFake { int installed_count{0}; int system_info_count{0}; int network_count{0}; + int config_count{0}; + Json::Value custom_hw_info; private: + Config &config; int primary_download_start{0}; int primary_download_complete{0}; int secondary_download_start{0}; @@ -902,7 +933,7 @@ TEST(Uptane, ProvisionOnServer) { RecordProperty("zephyr_key", "OTA-984,TST-149"); TemporaryDirectory temp_dir; Config config("tests/config/basic.toml"); - auto http = std::make_shared(temp_dir.Path(), "hasupdates"); + auto http = std::make_shared(temp_dir.Path(), "hasupdates", config); const std::string &server = http->tls_server; config.provision.server = server; config.tls.server = server; @@ -912,12 +943,14 @@ TEST(Uptane, ProvisionOnServer) { config.provision.device_id = "tst149_device_id"; config.provision.primary_ecu_serial = "CA:FE:A6:D2:84:9D"; config.provision.primary_ecu_hardware_id = "primary_hw"; + config.pacman.images_path = temp_dir.Path() / "images"; config.storage.path = temp_dir.Path(); UptaneTestCommon::addDefaultSecondary(config, temp_dir, "secondary_ecu_serial", "secondary_hw"); + logger_set_threshold(boost::log::trivial::trace); auto storage = INvStorage::newStorage(config.storage); auto events_channel = std::make_shared(); - auto up = UptaneTestCommon::newTestClient(config, storage, http, events_channel); + auto up = std_::make_unique(config, storage, http, events_channel); EXPECT_EQ(http->devices_count, 0); EXPECT_EQ(http->ecus_count, 0); @@ -925,6 +958,7 @@ TEST(Uptane, ProvisionOnServer) { EXPECT_EQ(http->installed_count, 0); EXPECT_EQ(http->system_info_count, 0); EXPECT_EQ(http->network_count, 0); + EXPECT_EQ(http->config_count, 0); EXPECT_NO_THROW(up->initialize()); EcuSerials serials; @@ -935,14 +969,14 @@ TEST(Uptane, ProvisionOnServer) { EXPECT_EQ(http->ecus_count, 1); EXPECT_NO_THROW(up->sendDeviceData()); - EXPECT_EQ(http->manifest_count, 1); EXPECT_EQ(http->installed_count, 1); EXPECT_EQ(http->system_info_count, 1); EXPECT_EQ(http->network_count, 1); + EXPECT_EQ(http->config_count, 1); result::UpdateCheck update_result = up->fetchMeta(); EXPECT_EQ(update_result.status, result::UpdateStatus::kUpdatesAvailable); - EXPECT_EQ(http->manifest_count, 2); + EXPECT_EQ(http->manifest_count, 1); // Test installation to make sure the metadata put to the server is correct. result::Download download_result = up->downloadImages(update_result.updates); @@ -954,10 +988,48 @@ TEST(Uptane, ProvisionOnServer) { EXPECT_EQ(http->devices_count, 1); EXPECT_EQ(http->ecus_count, 1); - EXPECT_EQ(http->manifest_count, 3); + EXPECT_EQ(http->manifest_count, 2); EXPECT_EQ(http->installed_count, 1); EXPECT_EQ(http->system_info_count, 1); EXPECT_EQ(http->network_count, 1); + EXPECT_EQ(http->config_count, 1); + + // Try sending device data again to confirm that it isn't resent if it hasn't + // changed (and hardware info is only sent once). + up->setCustomHardwareInfo(http->custom_hw_info); + EXPECT_NO_THROW(up->sendDeviceData()); + EXPECT_EQ(http->installed_count, 1); + EXPECT_EQ(http->system_info_count, 1); + EXPECT_EQ(http->network_count, 1); + EXPECT_EQ(http->config_count, 1); + + // Clear the stored values and resend to verify the data is resent. + storage->clearDeviceData(); + EXPECT_NO_THROW(up->sendDeviceData()); + EXPECT_EQ(http->installed_count, 2); + EXPECT_EQ(http->system_info_count, 2); + EXPECT_EQ(http->network_count, 2); + EXPECT_EQ(http->config_count, 2); + + // Set hardware info to a custom value and send device data again. + http->custom_hw_info["hardware"] = "test-hw"; + up->setCustomHardwareInfo(http->custom_hw_info); + EXPECT_NO_THROW(up->sendDeviceData()); + EXPECT_EQ(http->installed_count, 2); + EXPECT_EQ(http->system_info_count, 3); + EXPECT_EQ(http->network_count, 2); + EXPECT_EQ(http->config_count, 2); + + // Try once again; nothing should be resent. + EXPECT_NO_THROW(up->sendDeviceData()); + EXPECT_EQ(http->installed_count, 2); + EXPECT_EQ(http->system_info_count, 3); + EXPECT_EQ(http->network_count, 2); + EXPECT_EQ(http->config_count, 2); + + // Report Queue is asynchronous, so we cannot be sure + // that it is flashed until it was destroyed + up.reset(); EXPECT_EQ(http->events_seen, 8); } @@ -1002,17 +1074,17 @@ TEST(Uptane, FsToSqlFull) { std::string director_root; std::string director_targets; - std::string images_root; - std::string images_targets; - std::string images_timestamp; - std::string images_snapshot; + std::string image_root; + std::string image_targets; + std::string image_timestamp; + std::string image_snapshot; EXPECT_TRUE(fs_storage.loadLatestRoot(&director_root, Uptane::RepositoryType::Director())); EXPECT_TRUE(fs_storage.loadNonRoot(&director_targets, Uptane::RepositoryType::Director(), Uptane::Role::Targets())); - EXPECT_TRUE(fs_storage.loadLatestRoot(&images_root, Uptane::RepositoryType::Image())); - EXPECT_TRUE(fs_storage.loadNonRoot(&images_targets, Uptane::RepositoryType::Image(), Uptane::Role::Targets())); - EXPECT_TRUE(fs_storage.loadNonRoot(&images_timestamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp())); - EXPECT_TRUE(fs_storage.loadNonRoot(&images_snapshot, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot())); + EXPECT_TRUE(fs_storage.loadLatestRoot(&image_root, Uptane::RepositoryType::Image())); + EXPECT_TRUE(fs_storage.loadNonRoot(&image_targets, Uptane::RepositoryType::Image(), Uptane::Role::Targets())); + EXPECT_TRUE(fs_storage.loadNonRoot(&image_timestamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp())); + EXPECT_TRUE(fs_storage.loadNonRoot(&image_snapshot, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot())); EXPECT_TRUE(boost::filesystem::exists(config.uptane_public_key_path.get(config.path))); EXPECT_TRUE(boost::filesystem::exists(config.uptane_private_key_path.get(config.path))); @@ -1075,17 +1147,17 @@ TEST(Uptane, FsToSqlFull) { std::string sql_director_root; std::string sql_director_targets; - std::string sql_images_root; - std::string sql_images_targets; - std::string sql_images_timestamp; - std::string sql_images_snapshot; + std::string sql_image_root; + std::string sql_image_targets; + std::string sql_image_timestamp; + std::string sql_image_snapshot; sql_storage->loadLatestRoot(&sql_director_root, Uptane::RepositoryType::Director()); sql_storage->loadNonRoot(&sql_director_targets, Uptane::RepositoryType::Director(), Uptane::Role::Targets()); - sql_storage->loadLatestRoot(&sql_images_root, Uptane::RepositoryType::Image()); - sql_storage->loadNonRoot(&sql_images_targets, Uptane::RepositoryType::Image(), Uptane::Role::Targets()); - sql_storage->loadNonRoot(&sql_images_timestamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()); - sql_storage->loadNonRoot(&sql_images_snapshot, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()); + sql_storage->loadLatestRoot(&sql_image_root, Uptane::RepositoryType::Image()); + sql_storage->loadNonRoot(&sql_image_targets, Uptane::RepositoryType::Image(), Uptane::Role::Targets()); + sql_storage->loadNonRoot(&sql_image_timestamp, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp()); + sql_storage->loadNonRoot(&sql_image_snapshot, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot()); EXPECT_EQ(sql_public_key, public_key); EXPECT_EQ(sql_private_key, private_key); @@ -1099,10 +1171,10 @@ TEST(Uptane, FsToSqlFull) { EXPECT_EQ(sql_director_root, director_root); EXPECT_EQ(sql_director_targets, director_targets); - EXPECT_EQ(sql_images_root, images_root); - EXPECT_EQ(sql_images_targets, images_targets); - EXPECT_EQ(sql_images_timestamp, images_timestamp); - EXPECT_EQ(sql_images_snapshot, images_snapshot); + EXPECT_EQ(sql_image_root, image_root); + EXPECT_EQ(sql_image_targets, image_targets); + EXPECT_EQ(sql_image_timestamp, image_timestamp); + EXPECT_EQ(sql_image_snapshot, image_snapshot); } /* Import a list of installed packages into the storage. */ @@ -1174,8 +1246,6 @@ class HttpFakeUnstable : public HttpFake { HttpFakeUnstable(const boost::filesystem::path &test_dir_in) : HttpFake(test_dir_in, "hasupdates") {} HttpResponse get(const std::string &url, int64_t maxsize) override { if (unstable_valid_count >= unstable_valid_num) { - ++unstable_valid_num; - unstable_valid_count = 0; return HttpResponse({}, 503, CURLE_OK, ""); } else { ++unstable_valid_count; @@ -1183,22 +1253,30 @@ class HttpFakeUnstable : public HttpFake { } } + void setUnstableValidNum(int num) { + unstable_valid_num = num; + unstable_valid_count = 0; + } + int unstable_valid_num{0}; int unstable_valid_count{0}; }; /* Recover from an interrupted Uptane iteration. - * Fetch metadata from the director. - * Check metadata from the director. + * Fetch metadata from the Director. + * Check metadata from the Director. * Identify targets for known ECUs. - * Fetch metadata from the images repo. - * Check metadata from the images repo. */ + * Fetch metadata from the Image repo. + * Check metadata from the Image repo. + * + * This is a bit fragile because it depends upon a precise number of HTTP get + * requests being made. If that changes, this test will need to be adjusted. */ TEST(Uptane, restoreVerify) { TemporaryDirectory temp_dir; auto http = std::make_shared(temp_dir.Path()); Config config("tests/config/basic.toml"); config.storage.path = temp_dir.Path(); - config.pacman.type = PackageManager::kNone; + config.pacman.type = PACKAGE_MANAGER_NONE; config.uptane.director_server = http->tls_server + "director"; config.uptane.repo_server = http->tls_server + "repo"; config.provision.primary_ecu_serial = "CA:FE:A6:D2:84:9D"; @@ -1207,50 +1285,56 @@ TEST(Uptane, restoreVerify) { config.postUpdateValues(); auto storage = INvStorage::newStorage(config.storage); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); + auto sota_client = std_::make_unique(config, storage, http); EXPECT_NO_THROW(sota_client->initialize()); sota_client->AssembleManifest(); // 1st attempt, don't get anything - EXPECT_FALSE(sota_client->uptaneIteration(nullptr, nullptr)); + EXPECT_THROW(sota_client->uptaneIteration(nullptr, nullptr), Uptane::MetadataFetchFailure); EXPECT_FALSE(storage->loadLatestRoot(nullptr, Uptane::RepositoryType::Director())); - // 2nd attempt, get director root.json - EXPECT_FALSE(sota_client->uptaneIteration(nullptr, nullptr)); + // 2nd attempt, get Director root.json + http->setUnstableValidNum(1); + EXPECT_THROW(sota_client->uptaneIteration(nullptr, nullptr), Uptane::MetadataFetchFailure); EXPECT_TRUE(storage->loadLatestRoot(nullptr, Uptane::RepositoryType::Director())); EXPECT_FALSE(storage->loadNonRoot(nullptr, Uptane::RepositoryType::Director(), Uptane::Role::Targets())); - // 3rd attempt, get director targets.json - EXPECT_FALSE(sota_client->uptaneIteration(nullptr, nullptr)); + // 3rd attempt, get Director targets.json + http->setUnstableValidNum(2); + EXPECT_THROW(sota_client->uptaneIteration(nullptr, nullptr), Uptane::MetadataFetchFailure); EXPECT_TRUE(storage->loadLatestRoot(nullptr, Uptane::RepositoryType::Director())); EXPECT_TRUE(storage->loadNonRoot(nullptr, Uptane::RepositoryType::Director(), Uptane::Role::Targets())); EXPECT_FALSE(storage->loadLatestRoot(nullptr, Uptane::RepositoryType::Image())); - // 4th attempt, get images root.json - EXPECT_FALSE(sota_client->uptaneIteration(nullptr, nullptr)); + // 4th attempt, get Image repo root.json + http->setUnstableValidNum(3); + EXPECT_THROW(sota_client->uptaneIteration(nullptr, nullptr), Uptane::MetadataFetchFailure); EXPECT_TRUE(storage->loadLatestRoot(nullptr, Uptane::RepositoryType::Image())); EXPECT_FALSE(storage->loadNonRoot(nullptr, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp())); - // 5th attempt, get images timestamp.json - EXPECT_FALSE(sota_client->uptaneIteration(nullptr, nullptr)); + // 5th attempt, get Image repo timestamp.json + http->setUnstableValidNum(4); + EXPECT_THROW(sota_client->uptaneIteration(nullptr, nullptr), Uptane::MetadataFetchFailure); EXPECT_TRUE(storage->loadNonRoot(nullptr, Uptane::RepositoryType::Image(), Uptane::Role::Timestamp())); EXPECT_FALSE(storage->loadNonRoot(nullptr, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot())); - // 6th attempt, get images snapshot.json - EXPECT_FALSE(sota_client->uptaneIteration(nullptr, nullptr)); + // 6th attempt, get Image repo snapshot.json + http->setUnstableValidNum(5); + EXPECT_THROW(sota_client->uptaneIteration(nullptr, nullptr), Uptane::MetadataFetchFailure); EXPECT_TRUE(storage->loadNonRoot(nullptr, Uptane::RepositoryType::Image(), Uptane::Role::Snapshot())); EXPECT_FALSE(storage->loadNonRoot(nullptr, Uptane::RepositoryType::Image(), Uptane::Role::Targets())); - // 7th attempt, get images targets.json, successful iteration - EXPECT_TRUE(sota_client->uptaneIteration(nullptr, nullptr)); + // 7th attempt, get Image repo targets.json, successful iteration + http->setUnstableValidNum(6); + EXPECT_NO_THROW(sota_client->uptaneIteration(nullptr, nullptr)); EXPECT_TRUE(storage->loadNonRoot(nullptr, Uptane::RepositoryType::Image(), Uptane::Role::Targets())); } -/* Fetch metadata from the director. - * Check metadata from the director. +/* Fetch metadata from the Director. + * Check metadata from the Director. * Identify targets for known ECUs. - * Fetch metadata from the images repo. - * Check metadata from the images repo. */ + * Fetch metadata from the Image repo. + * Check metadata from the Image repo. */ TEST(Uptane, offlineIteration) { TemporaryDirectory temp_dir; auto http = std::make_shared(temp_dir.Path(), "hasupdates"); @@ -1258,21 +1342,21 @@ TEST(Uptane, offlineIteration) { config.storage.path = temp_dir.Path(); config.uptane.director_server = http->tls_server + "director"; config.uptane.repo_server = http->tls_server + "repo"; - config.pacman.type = PackageManager::kNone; + config.pacman.type = PACKAGE_MANAGER_NONE; config.provision.primary_ecu_serial = "CA:FE:A6:D2:84:9D"; config.provision.primary_ecu_hardware_id = "primary_hw"; UptaneTestCommon::addDefaultSecondary(config, temp_dir, "secondary_ecu_serial", "secondary_hw"); config.postUpdateValues(); auto storage = INvStorage::newStorage(config.storage); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); + auto sota_client = std_::make_unique(config, storage, http); EXPECT_NO_THROW(sota_client->initialize()); std::vector targets_online; - EXPECT_TRUE(sota_client->uptaneIteration(&targets_online, nullptr)); + EXPECT_NO_THROW(sota_client->uptaneIteration(&targets_online, nullptr)); std::vector targets_offline; - EXPECT_TRUE(sota_client->uptaneOfflineIteration(&targets_offline, nullptr)); + EXPECT_NO_THROW(sota_client->uptaneOfflineIteration(&targets_offline, nullptr)); EXPECT_TRUE(Uptane::MatchTargetVector(targets_online, targets_offline)); } @@ -1287,30 +1371,21 @@ TEST(Uptane, IgnoreUnknownUpdate) { config.storage.path = temp_dir.Path(); config.uptane.director_server = http->tls_server + "director"; config.uptane.repo_server = http->tls_server + "repo"; - config.pacman.type = PackageManager::kNone; + config.pacman.type = PACKAGE_MANAGER_NONE; config.provision.primary_ecu_serial = "primary_ecu"; config.provision.primary_ecu_hardware_id = "primary_hw"; UptaneTestCommon::addDefaultSecondary(config, temp_dir, "secondary_ecu_serial", "secondary_hw"); config.postUpdateValues(); auto storage = INvStorage::newStorage(config.storage); - auto sota_client = UptaneTestCommon::newTestClient(config, storage, http); + auto sota_client = std_::make_unique(config, storage, http); EXPECT_NO_THROW(sota_client->initialize()); + auto result = sota_client->fetchMeta(); EXPECT_EQ(result.status, result::UpdateStatus::kError); - EXPECT_STREQ(sota_client->getLastException().what(), - "The target had an ECU ID that did not match the client's configured ECU id."); - sota_client->last_exception = Uptane::Exception{"", ""}; - result = sota_client->checkUpdates(); - EXPECT_EQ(result.status, result::UpdateStatus::kError); - EXPECT_STREQ(sota_client->getLastException().what(), - "The target had an ECU ID that did not match the client's configured ECU id."); std::vector packages_to_install = UptaneTestCommon::makePackage("testecuserial", "testecuhwid"); - sota_client->last_exception = Uptane::Exception{"", ""}; auto report = sota_client->uptaneInstall(packages_to_install); - EXPECT_STREQ(sota_client->getLastException().what(), - "The target had an ECU ID that did not match the client's configured ECU id."); EXPECT_EQ(report.ecu_reports.size(), 0); } @@ -1324,10 +1399,11 @@ TEST(Uptane, Pkcs11Provision) { config.tls.pkey_source = CryptoSource::kPkcs11; config.p11.module = TEST_PKCS11_MODULE_PATH; config.p11.pass = "1234"; + config.p11.label = "Virtual token"; config.p11.tls_clientcert_id = "01"; config.p11.tls_pkey_id = "02"; config.import.base_path = (temp_dir / "import").string(); - config.import.tls_cacert_path = BasedPath("root.crt"); + config.import.tls_cacert_path = utils::BasedPath("root.crt"); config.storage.path = temp_dir.Path(); config.postUpdateValues(); @@ -1335,10 +1411,9 @@ TEST(Uptane, Pkcs11Provision) { auto storage = INvStorage::newStorage(config.storage); storage->importData(config.import); auto http = std::make_shared(temp_dir.Path(), "hasupdates"); - KeyManager keys(storage, config.keymanagerConfig()); - Initializer initializer(config.provision, storage, http, keys, {}); + auto keys = std::make_shared(storage, config.keymanagerConfig()); - EXPECT_TRUE(initializer.isSuccessful()); + ExpectProvisionOK(Provisioner(config.provision, storage, http, keys, {})); } #endif diff --git a/src/libaktualizr/uptane/uptanerepository.cc b/src/libaktualizr/uptane/uptanerepository.cc index 6602eb55cf..32b6a6fdad 100644 --- a/src/libaktualizr/uptane/uptanerepository.cc +++ b/src/libaktualizr/uptane/uptanerepository.cc @@ -1,55 +1,91 @@ #include "uptane/uptanerepository.h" -#include - -#include -#include -#include -#include -#include #include -#include -#include "bootstrap/bootstrap.h" -#include "crypto/crypto.h" -#include "crypto/openssl_compat.h" +#include "fetcher.h" #include "logging/logging.h" #include "storage/invstorage.h" +#include "uptane/exceptions.h" #include "utilities/utils.h" namespace Uptane { -bool RepositoryCommon::initRoot(const std::string& root_raw) { +const std::string RepositoryType::DIRECTOR = "director"; +const std::string RepositoryType::IMAGE = "image"; + +void RepositoryCommon::initRoot(RepositoryType repo_type, const std::string& root_raw) { try { root = Root(type, Utils::parseJSON(root_raw)); // initialization and format check root = Root(type, Utils::parseJSON(root_raw), root); // signature verification against itself } catch (const std::exception& e) { - LOG_ERROR << "Loading initial root failed: " << e.what(); - return false; + LOG_ERROR << "Loading initial " << repo_type << " Root metadata failed: " << e.what(); + throw; } - return true; } -bool RepositoryCommon::verifyRoot(const std::string& root_raw) { +void RepositoryCommon::verifyRoot(const std::string& root_raw) { try { - int prev_version = root.version(); + int prev_version = rootVersion(); + // 5.4.4.3.2.3. Version N+1 of the Root metadata file MUST have been signed + // by the following: (1) a threshold of keys specified in the latest Root + // metadata file (version N), and (2) a threshold of keys specified in the + // new Root metadata file being validated (version N+1). root = Root(type, Utils::parseJSON(root_raw), root); // double signature verification + // 5.4.4.3.2.4. The version number of the latest Root metadata file (version + // N) must be less than or equal to the version number of the new Root + // metadata file (version N+1). NOTE: we do not accept an equal version + // number. It must increment. if (root.version() != prev_version + 1) { - LOG_ERROR << "Version in root metadata doesn't match the expected value"; - return false; + LOG_ERROR << "Version " << root.version() << " in Root metadata doesn't match the expected value " + << prev_version + 1; + throw Uptane::RootRotationError(type.ToString()); } } catch (const std::exception& e) { - LOG_ERROR << "Signature verification for root metadata failed: " << e.what(); - return false; + LOG_ERROR << "Signature verification for Root metadata failed: " << e.what(); + throw; } - return true; } void RepositoryCommon::resetRoot() { root = Root(Root::Policy::kAcceptAll); } -Json::Value Manifest::signManifest(const Json::Value& manifest_unsigned) const { - Json::Value manifest = keys_.signTuf(manifest_unsigned); - return manifest; +void RepositoryCommon::updateRoot(INvStorage& storage, const IMetadataFetcher& fetcher, + const RepositoryType repo_type) { + // 5.4.4.3.1. Load the previous Root metadata file. + { + std::string root_raw; + if (storage.loadLatestRoot(&root_raw, repo_type)) { + initRoot(repo_type, root_raw); + } else { + fetcher.fetchRole(&root_raw, kMaxRootSize, repo_type, Role::Root(), Version(1)); + initRoot(repo_type, root_raw); + storage.storeRoot(root_raw, repo_type, Version(1)); + } + } + + // 5.4.4.3.2. Update to the latest Root metadata file. + for (int version = rootVersion() + 1; version < kMaxRotations; ++version) { + // 5.4.4.3.2.2. Try downloading a new version N+1 of the Root metadata file. + std::string root_raw; + try { + fetcher.fetchRole(&root_raw, kMaxRootSize, repo_type, Role::Root(), Version(version)); + } catch (const std::exception& e) { + break; + } + + verifyRoot(root_raw); + + // 5.4.4.3.2.5. Set the latest Root metadata file to the new Root metadata + // file. + storage.storeRoot(root_raw, repo_type, Version(version)); + storage.clearNonRootMeta(repo_type); + } + + // 5.4.4.3.3. Check that the current (or latest securely attested) time is + // lower than the expiration timestamp in the latest Root metadata file. + // (Checks for a freeze attack.) + if (rootExpired()) { + throw Uptane::ExpiredMetadata(repo_type.ToString(), Role::ROOT); + } } } // namespace Uptane diff --git a/src/libaktualizr/uptane/uptanerepository.h b/src/libaktualizr/uptane/uptanerepository.h index 0176e352c9..06628ac908 100644 --- a/src/libaktualizr/uptane/uptanerepository.h +++ b/src/libaktualizr/uptane/uptanerepository.h @@ -1,49 +1,37 @@ #ifndef UPTANE_REPOSITORY_H_ #define UPTANE_REPOSITORY_H_ -#include +#include // for int64_t +#include // for string +#include "libaktualizr/types.h" // for TimeStamp +#include "uptane/tuf.h" // for Root, RepositoryType -#include "json/json.h" - -#include "config/config.h" -#include "crypto/crypto.h" -#include "crypto/keymanager.h" -#include "logging/logging.h" -#include "storage/invstorage.h" +class INvStorage; namespace Uptane { - -class Manifest { - public: - Manifest(const Config &config_in, std::shared_ptr storage_in) - : storage_{std::move(storage_in)}, keys_(storage_, config_in.keymanagerConfig()) {} - - Json::Value signManifest(const Json::Value &manifest_unsigned) const; - - void setPrimaryEcuSerialHwId(const std::pair &serials) { - primary_ecu_serial = serials.first; - primary_hardware_id = serials.second; - } - - EcuSerial getPrimaryEcuSerial() const { return primary_ecu_serial; } - - private: - Uptane::EcuSerial primary_ecu_serial{Uptane::EcuSerial::Unknown()}; - Uptane::HardwareIdentifier primary_hardware_id{Uptane::HardwareIdentifier::Unknown()}; - std::shared_ptr storage_; - KeyManager keys_; -}; +class IMetadataFetcher; class RepositoryCommon { public: + // NOLINTNEXTLINE(google-explicit-constructor, hicpp-explicit-conversions) RepositoryCommon(RepositoryType type_in) : type{type_in} {} - bool initRoot(const std::string &root_raw); - bool verifyRoot(const std::string &root_raw); - int rootVersion() { return root.version(); } - bool rootExpired() { return root.isExpired(TimeStamp::Now()); } + virtual ~RepositoryCommon() = default; + RepositoryCommon(const RepositoryCommon &guard) = default; + RepositoryCommon(RepositoryCommon &&) = default; + RepositoryCommon &operator=(const RepositoryCommon &guard) = default; + RepositoryCommon &operator=(RepositoryCommon &&) = default; + void initRoot(RepositoryType repo_type, const std::string &root_raw); + void verifyRoot(const std::string &root_raw); + int rootVersion() const { return root.version(); } + bool rootExpired() const { return root.isExpired(TimeStamp::Now()); } + virtual void updateMeta(INvStorage &storage, const IMetadataFetcher &fetcher) = 0; protected: void resetRoot(); + void updateRoot(INvStorage &storage, const IMetadataFetcher &fetcher, RepositoryType repo_type); + + static const int64_t kMaxRotations = 1000; + Root root; RepositoryType type; }; diff --git a/src/libaktualizr/utilities/CMakeLists.txt b/src/libaktualizr/utilities/CMakeLists.txt index dee3f068cb..8bdec8b309 100644 --- a/src/libaktualizr/utilities/CMakeLists.txt +++ b/src/libaktualizr/utilities/CMakeLists.txt @@ -1,8 +1,8 @@ set(SOURCES aktualizr_version.cc apiqueue.cc dequeue_buffer.cc + results.cc sig_handler.cc - sockaddr_io.cc timer.cc types.cc utils.cc) @@ -14,20 +14,20 @@ set(HEADERS apiqueue.h exceptions.h fault_injection.h sig_handler.h - sockaddr_io.h timer.h - types.h - utils.h) + utils.h + xml2json.h) set_property(SOURCE aktualizr_version.cc PROPERTY COMPILE_DEFINITIONS AKTUALIZR_VERSION="${AKTUALIZR_VERSION}") add_library(utilities OBJECT ${SOURCES}) -include(AddAktualizrTest) +add_aktualizr_test(NAME api_queue SOURCES api_queue_test.cc) add_aktualizr_test(NAME dequeue_buffer SOURCES dequeue_buffer_test.cc) add_aktualizr_test(NAME timer SOURCES timer_test.cc) add_aktualizr_test(NAME types SOURCES types_test.cc) add_aktualizr_test(NAME utils SOURCES utils_test.cc PROJECT_WORKING_DIRECTORY) add_aktualizr_test(NAME sighandler SOURCES sighandler_test.cc) +add_aktualizr_test(NAME xml2json SOURCES xml2json_test.cc) aktualizr_source_file_checks(${SOURCES} ${HEADERS} ${TEST_SOURCES}) diff --git a/src/libaktualizr/utilities/api_queue_test.cc b/src/libaktualizr/utilities/api_queue_test.cc new file mode 100644 index 0000000000..c44e46c79c --- /dev/null +++ b/src/libaktualizr/utilities/api_queue_test.cc @@ -0,0 +1,54 @@ +#include + +#include +#include +#include "utilities/apiqueue.h" + +using std::cout; +using std::future; +using std::future_status; + +class CheckLifetime { + public: + CheckLifetime() { cout << "ctor\n"; } + ~CheckLifetime() { + valid = 999; + cout << "dtor\n"; + } + CheckLifetime(const CheckLifetime& other) { + (void)other; + cout << "copy-ctor\n"; + } + CheckLifetime& operator=(const CheckLifetime&) = delete; + CheckLifetime& operator=(CheckLifetime&&) = delete; + CheckLifetime(CheckLifetime&&) = delete; + + int valid{100}; +}; + +TEST(ApiQueue, Simple) { + api::CommandQueue dut; + future result; + { + CheckLifetime checkLifetime; + std::function task([checkLifetime] { + cout << "Running task..." << checkLifetime.valid << "\n"; + return checkLifetime.valid; + }); + result = dut.enqueue(std::move(task)); + cout << "Leaving scope.."; + } + EXPECT_EQ(result.wait_for(std::chrono::milliseconds(100)), future_status::timeout); + + dut.run(); + // Include a timeout to avoid a failing test handing forever + ASSERT_EQ(result.wait_for(std::chrono::seconds(10)), future_status::ready); + EXPECT_EQ(result.get(), 100); +} + +#ifndef __NO_MAIN__ +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} +#endif diff --git a/src/libaktualizr/utilities/apiqueue.cc b/src/libaktualizr/utilities/apiqueue.cc index 0b1b56cddc..6060446827 100644 --- a/src/libaktualizr/utilities/apiqueue.cc +++ b/src/libaktualizr/utilities/apiqueue.cc @@ -57,6 +57,7 @@ void CommandQueue::run() { std::lock_guard g(thread_m_); if (!thread_.joinable()) { thread_ = std::thread([this] { + Context ctx{.flow_control = &token_}; std::unique_lock lock(m_); for (;;) { cv_.wait(lock, [this] { return (!queue_.empty() && !paused_) || shutdown_; }); @@ -66,7 +67,7 @@ void CommandQueue::run() { auto task = std::move(queue_.front()); queue_.pop(); lock.unlock(); - task(); + task->PerformTask(&ctx); lock.lock(); } }); @@ -101,7 +102,7 @@ void CommandQueue::abort(bool restart_thread) { { // Flush the queue and reset to initial state std::lock_guard g(m_); - std::queue>().swap(queue_); + std::queue().swap(queue_); token_.reset(); shutdown_ = false; } @@ -110,4 +111,13 @@ void CommandQueue::abort(bool restart_thread) { run(); } } + +void CommandQueue::enqueue(ICommand::Ptr&& task) { + { + std::lock_guard lock(m_); + queue_.push(std::move(task)); + } + cv_.notify_all(); +} + } // namespace api diff --git a/src/libaktualizr/utilities/apiqueue.h b/src/libaktualizr/utilities/apiqueue.h index 30f51cdf87..cf4d79e113 100644 --- a/src/libaktualizr/utilities/apiqueue.h +++ b/src/libaktualizr/utilities/apiqueue.h @@ -54,37 +54,117 @@ class FlowControlToken { mutable std::condition_variable cv_; }; +struct Context { + api::FlowControlToken* flow_control; +}; + +class ICommand { + public: + using Ptr = std::shared_ptr; + ICommand() = default; + virtual ~ICommand() = default; + // Non-movable-non-copyable + ICommand(const ICommand&) = delete; + ICommand(ICommand&&) = delete; + ICommand& operator=(const ICommand&) = delete; + ICommand& operator=(ICommand&&) = delete; + + virtual void PerformTask(Context* ctx) = 0; +}; + +template +class CommandBase : public ICommand { + public: + void PerformTask(Context* ctx) override { + try { + result_.set_value(TaskImplementation(ctx)); + } catch (...) { + result_.set_exception(std::current_exception()); + } + } + + std::future GetFuture() { return result_.get_future(); } + + protected: + virtual T TaskImplementation(Context*) = 0; + + private: + std::promise result_; +}; + +template <> +class CommandBase : public ICommand { + public: + void PerformTask(Context* ctx) override { + try { + TaskImplementation(ctx); + result_.set_value(); + } catch (...) { + result_.set_exception(std::current_exception()); + } + } + + std::future GetFuture() { return result_.get_future(); } + + protected: + virtual void TaskImplementation(Context*) = 0; + + private: + std::promise result_; +}; + +template +class Command : public CommandBase { + public: + explicit Command(std::function&& func) : f_{move(func)} {} + T TaskImplementation(Context* ctx) override { + (void)ctx; + return f_(); + } + + private: + std::function f_; +}; + +template +class CommandFlowControl : public CommandBase { + public: + explicit CommandFlowControl(std::function&& func) : f_{move(func)} {} + T TaskImplementation(Context* ctx) override { return f_(ctx->flow_control); } + + private: + std::function f_; +}; + class CommandQueue { public: + CommandQueue() = default; ~CommandQueue(); + // Non-copyable Non-movable + CommandQueue(const CommandQueue&) = delete; + CommandQueue(CommandQueue&&) = delete; + CommandQueue& operator=(const CommandQueue&) = delete; + CommandQueue& operator=(CommandQueue&&) = delete; void run(); bool pause(bool do_pause); // returns true iff pause→resume or resume→pause void abort(bool restart_thread = true); template - std::future enqueue(const std::function& f) { - std::packaged_task task(f); - auto r = task.get_future(); - { - std::lock_guard lock(m_); - queue_.push(std::packaged_task(std::move(task))); - } - cv_.notify_all(); - return r; + std::future enqueue(std::function&& function) { + auto task = std::make_shared>(std::move(function)); + enqueue(task); + return task->GetFuture(); } template - std::future enqueue(const std::function& f) { - std::packaged_task task(std::bind(f, &token_)); - auto r = task.get_future(); - { - std::lock_guard lock(m_); - queue_.push(std::packaged_task(std::move(task))); - } - cv_.notify_all(); - return r; + std::future enqueue(std::function&& function) { + auto task = std::make_shared>(std::move(function)); + enqueue(task); + return task->GetFuture(); } + void enqueue(ICommand::Ptr&& task); + private: std::atomic_bool shutdown_{false}; std::atomic_bool paused_{false}; @@ -92,7 +172,7 @@ class CommandQueue { std::thread thread_; std::mutex thread_m_; - std::queue> queue_; + std::queue queue_; std::mutex m_; std::condition_variable cv_; FlowControlToken token_; diff --git a/src/libaktualizr/utilities/config_utils.h b/src/libaktualizr/utilities/config_utils.h index cd7e0ec548..746cb36db7 100644 --- a/src/libaktualizr/utilities/config_utils.h +++ b/src/libaktualizr/utilities/config_utils.h @@ -5,8 +5,8 @@ #include +#include "libaktualizr/types.h" #include "logging/logging.h" -#include "types.h" #include "utils.h" /* @@ -62,6 +62,33 @@ inline void CopyFromConfig(T& dest, const std::string& option_name, const boost: } } +template <> +inline void CopyFromConfig(StorageType& dest, const std::string& option_name, const boost::property_tree::ptree& pt) { + boost::optional value = pt.get_optional(option_name); + if (value.is_initialized()) { + std::string storage_type{StripQuotesFromStrings(value.get())}; + if (storage_type == "sqlite") { + dest = StorageType::kSqlite; + } else { + dest = StorageType::kFileSystem; + } + } +} + +template <> +inline void CopyFromConfig(BootedType& dest, const std::string& option_name, const boost::property_tree::ptree& pt) { + boost::optional value = pt.get_optional(option_name); + if (value.is_initialized()) { + std::string storage_type{StripQuotesFromStrings(value.get())}; + // "0" is for backwards compatibility with aktualizr-lite usage. + if (storage_type == "staged" || storage_type == "0") { + dest = BootedType::kStaged; + } else { + dest = BootedType::kBooted; + } + } +} + template <> inline void CopyFromConfig(KeyType& dest, const std::string& option_name, const boost::property_tree::ptree& pt) { boost::optional value = pt.get_optional(option_name); @@ -95,10 +122,11 @@ inline void CopyFromConfig(CryptoSource& dest, const std::string& option_name, c } template <> -inline void CopyFromConfig(BasedPath& dest, const std::string& option_name, const boost::property_tree::ptree& pt) { +inline void CopyFromConfig(utils::BasedPath& dest, const std::string& option_name, + const boost::property_tree::ptree& pt) { boost::optional value = pt.get_optional(option_name); if (value.is_initialized()) { - BasedPath bp{StripQuotesFromStrings(value.get())}; + utils::BasedPath bp{StripQuotesFromStrings(value.get())}; dest = bp; } } @@ -122,49 +150,4 @@ inline void WriteSectionToStream(T& sec, const std::string& section_name, std::o os << "\n"; } -class BaseConfig { - public: - virtual ~BaseConfig() = default; - void updateFromToml(const boost::filesystem::path& filename) { - LOG_INFO << "Reading config: " << filename; - if (!boost::filesystem::exists(filename)) { - throw std::runtime_error("Config file " + filename.string() + " does not exist."); - } - boost::property_tree::ptree pt; - boost::property_tree::ini_parser::read_ini(filename.string(), pt); - updateFromPropertyTree(pt); - } - virtual void updateFromPropertyTree(const boost::property_tree::ptree& pt) = 0; - - protected: - void updateFromDirs(const std::vector& configs) { - std::map configs_map; - for (const auto& config : configs) { - if (!boost::filesystem::exists(config)) { - continue; - } - if (boost::filesystem::is_directory(config)) { - for (const auto& config_file : Utils::getDirEntriesByExt(config, ".toml")) { - configs_map[config_file.filename().string()] = config_file; - } - } else { - configs_map[config.filename().string()] = config; - } - } - for (const auto& config_file : configs_map) { - updateFromToml(config_file.second); - } - } - - void checkDirs(const std::vector& configs) { - for (const auto& config : configs) { - if (!boost::filesystem::exists(config)) { - throw std::runtime_error("Config directory " + config.string() + " does not exist."); - } - } - } - - std::vector config_dirs_ = {"/usr/lib/sota/conf.d", "/etc/sota/conf.d/"}; -}; - #endif // CONFIG_UTILS_H_ diff --git a/src/libaktualizr/utilities/dequeue_buffer.cc b/src/libaktualizr/utilities/dequeue_buffer.cc index 5d9acb2be0..29d54aa56a 100644 --- a/src/libaktualizr/utilities/dequeue_buffer.cc +++ b/src/libaktualizr/utilities/dequeue_buffer.cc @@ -3,12 +3,13 @@ #include #include #include + char* DequeueBuffer::Head() { assert(sentinel_ == kSentinel); return buffer_.data(); } -size_t DequeueBuffer::Size() { +size_t DequeueBuffer::Size() const { assert(sentinel_ == kSentinel); return written_bytes_; } @@ -23,8 +24,8 @@ void DequeueBuffer::Consume(size_t bytes) { throw std::logic_error("Attempt to DequeueBuffer::Consume() more bytes than are valid"); } // Shuffle up the buffer - auto next_unconsumed_byte = buffer_.begin() + bytes; - auto end_of_written_area = buffer_.begin() + written_bytes_; + auto* next_unconsumed_byte = buffer_.begin() + bytes; + auto* end_of_written_area = buffer_.begin() + written_bytes_; std::copy(next_unconsumed_byte, end_of_written_area, buffer_.begin()); written_bytes_ -= bytes; } @@ -49,4 +50,4 @@ void DequeueBuffer::HaveEnqueued(size_t bytes) { throw std::logic_error("Wrote bytes beyond the end of the buffer"); } written_bytes_ += bytes; -} \ No newline at end of file +} diff --git a/src/libaktualizr/utilities/dequeue_buffer.h b/src/libaktualizr/utilities/dequeue_buffer.h index e6fba78733..79d459ac1f 100644 --- a/src/libaktualizr/utilities/dequeue_buffer.h +++ b/src/libaktualizr/utilities/dequeue_buffer.h @@ -18,7 +18,7 @@ class DequeueBuffer { /** * The number of elements that are valid (have been written) after Head() */ - size_t Size(); + size_t Size() const; /** * Called after bytes have been read from Head(). Remove them from the head diff --git a/src/libaktualizr/utilities/exceptions.h b/src/libaktualizr/utilities/exceptions.h index 1a997a6a82..3a04432c62 100644 --- a/src/libaktualizr/utilities/exceptions.h +++ b/src/libaktualizr/utilities/exceptions.h @@ -8,13 +8,11 @@ class FatalException : public std::logic_error { public: explicit FatalException(const std::string &what_arg) : std::logic_error(what_arg.c_str()) { LOG_FATAL << what_arg; } - ~FatalException() noexcept override = default; }; class NotImplementedException : public std::logic_error { public: NotImplementedException() : std::logic_error("Function not yet implemented.") {} - ~NotImplementedException() noexcept override = default; }; #endif diff --git a/src/libaktualizr/utilities/fault_injection.h b/src/libaktualizr/utilities/fault_injection.h index 35d6de1d2f..a0b4074ab0 100644 --- a/src/libaktualizr/utilities/fault_injection.h +++ b/src/libaktualizr/utilities/fault_injection.h @@ -15,8 +15,10 @@ * http://blitiri.com.ar/p/libfiu. */ -#ifndef _FAULT_INJECTION_H -#define _FAULT_INJECTION_H +#ifndef FAULT_INJECTION_H_ +#define FAULT_INJECTION_H_ + +#include /* Only define the stubs when fiu is disabled, otherwise use the real fiu.h * header */ @@ -30,16 +32,17 @@ #define fiu_return_on(name, retval) // Note: was `#define fault_injection_last_info() ""` but it triggers +// NOLINTNEXTLINE(clang-diagnostic-unused-function) static inline std::string fault_injection_last_info() { return ""; } #else -#include #include #include + +#include #include #include -#include #include #include @@ -48,23 +51,25 @@ static constexpr size_t fault_injection_info_bs = 256; static inline const char *fault_injection_info_fn() { static std::mutex mutex; - static char info_fn[128]; + static std::array info_fn{}; std::lock_guard lock(mutex); if (info_fn[0] != '\0') { - return info_fn; + return info_fn.data(); } - snprintf(info_fn, sizeof(info_fn), "/tmp/fiu-ctrl-info-%lu", static_cast(getpid())); + snprintf(info_fn.data(), info_fn.size(), "/tmp/fiu-ctrl-info-%lu", static_cast(getpid())); - return info_fn; + return info_fn.data(); } +// NOLINTNEXTLINE(clang-diagnostic-unused-function) static inline std::string fault_injection_last_info() { auto info_id = reinterpret_cast(fiu_failinfo()); std::array arr{}; + // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) std::streamoff offset = (info_id & 0xfffffff) * fault_injection_info_bs; std::ifstream f; f.exceptions(std::ifstream::failbit | std::ifstream::badbit); @@ -73,7 +78,7 @@ static inline std::string fault_injection_last_info() { // check high bit of info_id to see if it should look into FIU_INFO_FILE or // pid-dependent file name const char *fn = nullptr; - if ((info_id & (1lu << 31)) != 0) { + if ((info_id & (1LU << 31)) != 0) { fn = getenv("FIU_INFO_FILE"); } else { fn = fault_injection_info_fn(); @@ -90,6 +95,7 @@ static inline std::string fault_injection_last_info() { } // proxy for fiu_enable, with persisted failinfo (through a file) +// NOLINTNEXTLINE(clang-diagnostic-unused-function) static inline int fault_injection_enable(const char *name, int failnum, const std::string &failinfo, unsigned int flags) { std::array arr{}; @@ -97,7 +103,7 @@ static inline int fault_injection_enable(const char *name, int failnum, const st size_t failinfo_id = 0; - if (failinfo != "") { + if (!failinfo.empty()) { std::ofstream f; f.exceptions(std::ifstream::failbit | std::ifstream::badbit); @@ -110,11 +116,13 @@ static inline int fault_injection_enable(const char *name, int failnum, const st } } + // NOLINTNEXTLINE(performance-no-int-to-ptr) return fiu_enable(name, failnum, reinterpret_cast(failinfo_id), flags); } // proxy for fiu_init, but also explicitly clears the persisted failinfo, in // case it is lingering from a previous test case. +// NOLINTNEXTLINE(clang-diagnostic-unused-function) static inline void fault_injection_init() { fiu_init(0); std::ofstream f; @@ -128,4 +136,4 @@ static inline void fault_injection_init() { #endif /* FIU_ENABLE */ -#endif /* _FAULT_INJECTION_H */ +#endif /* FAULT_INJECTION_H_ */ diff --git a/src/libaktualizr/utilities/results.cc b/src/libaktualizr/utilities/results.cc new file mode 100644 index 0000000000..b6569d02ed --- /dev/null +++ b/src/libaktualizr/utilities/results.cc @@ -0,0 +1,48 @@ +#include "libaktualizr/results.h" + +namespace result { + +using result::DownloadStatus; +using result::UpdateStatus; + +std::ostream& operator<<(std::ostream& os, UpdateStatus update_status) { + switch (update_status) { + case UpdateStatus::kUpdatesAvailable: + os << "Updates Available"; + break; + case UpdateStatus::kNoUpdatesAvailable: + os << "No Updates Available"; + break; + case UpdateStatus::kError: + os << "Update Error"; + break; + default: + os << "Unknown UpdateStatus(" << static_cast(update_status) << ")"; + } + return os; +} + +std::ostream& operator<<(std::ostream& os, const DownloadStatus stat) { + os << "\""; + switch (stat) { + case DownloadStatus::kSuccess: + os << "Success"; + break; + case DownloadStatus::kPartialSuccess: + os << "Partial success"; + break; + case DownloadStatus::kNothingToDownload: + os << "Nothing to download"; + break; + case DownloadStatus::kError: + os << "Error"; + break; + default: + os << "unknown"; + break; + } + os << "\""; + return os; +} + +} // namespace result \ No newline at end of file diff --git a/src/libaktualizr/utilities/sig_handler.cc b/src/libaktualizr/utilities/sig_handler.cc index edcf3e093a..943e16ab18 100644 --- a/src/libaktualizr/utilities/sig_handler.cc +++ b/src/libaktualizr/utilities/sig_handler.cc @@ -1,10 +1,10 @@ #include "sig_handler.h" #include "logging/logging.h" -std::atomic SigHandler::signal_marker_; -std::mutex SigHandler::exit_m_; -std::condition_variable SigHandler::exit_cv_; -bool SigHandler::exit_flag_; +std::atomic_uint SigHandler::signal_marker_; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) +std::mutex SigHandler::exit_m_; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) +std::condition_variable SigHandler::exit_cv_; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) +bool SigHandler::exit_flag_; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) SigHandler& SigHandler::get() { static SigHandler handler; @@ -12,14 +12,17 @@ SigHandler& SigHandler::get() { } SigHandler::~SigHandler() { - { - std::lock_guard g(exit_m_); - exit_flag_ = true; - } - exit_cv_.notify_all(); + try { + { + std::lock_guard g(exit_m_); + exit_flag_ = true; + } + exit_cv_.notify_all(); - if (polling_thread_.joinable()) { - polling_thread_.join(); + if (polling_thread_.joinable()) { + polling_thread_.join(); + } + } catch (...) { } } @@ -31,9 +34,9 @@ void SigHandler::start(const std::function& on_signal) { polling_thread_ = boost::thread([on_signal]() { std::unique_lock l(exit_m_); while (true) { - bool got_signal = signal_marker_.exchange(false); + auto got_signal = signal_marker_.exchange(0); - if (got_signal) { + if (got_signal > 0) { on_signal(); return; } @@ -49,7 +52,7 @@ void SigHandler::signal(int sig) { ::signal(sig, signal_handler); } void SigHandler::signal_handler(int sig) { (void)sig; - bool v = false; + unsigned int v = 0; // put true if currently set to false - SigHandler::signal_marker_.compare_exchange_strong(v, true); + SigHandler::signal_marker_.compare_exchange_strong(v, 1); } diff --git a/src/libaktualizr/utilities/sig_handler.h b/src/libaktualizr/utilities/sig_handler.h index 3ccc5a38d9..de26aadcd7 100644 --- a/src/libaktualizr/utilities/sig_handler.h +++ b/src/libaktualizr/utilities/sig_handler.h @@ -15,7 +15,9 @@ class SigHandler { static SigHandler& get(); SigHandler(const SigHandler&) = delete; + SigHandler(SigHandler&&) = delete; SigHandler& operator=(const SigHandler&) = delete; + SigHandler& operator=(SigHandler&&) = delete; // set an handler for signals and start the handling thread void start(const std::function& on_signal); @@ -31,11 +33,11 @@ class SigHandler { static void signal_handler(int sig); boost::thread polling_thread_; - static std::atomic signal_marker_; + static std::atomic_uint signal_marker_; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) - static std::mutex exit_m_; - static std::condition_variable exit_cv_; - static bool exit_flag_; + static std::mutex exit_m_; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) + static std::condition_variable exit_cv_; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) + static bool exit_flag_; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) }; void signal_handler(int sig); diff --git a/src/libaktualizr/utilities/sockaddr_io.cc b/src/libaktualizr/utilities/sockaddr_io.cc deleted file mode 100644 index 7e7e582a07..0000000000 --- a/src/libaktualizr/utilities/sockaddr_io.cc +++ /dev/null @@ -1,7 +0,0 @@ -#include "utilities/sockaddr_io.h" -#include "utilities/utils.h" - -std::ostream &operator<<(std::ostream &os, const sockaddr_storage &saddr) { - os << Utils::ipDisplayName(saddr) << ":" << Utils::ipPort(saddr); - return os; -} \ No newline at end of file diff --git a/src/libaktualizr/utilities/sockaddr_io.h b/src/libaktualizr/utilities/sockaddr_io.h deleted file mode 100644 index b1635ad9ec..0000000000 --- a/src/libaktualizr/utilities/sockaddr_io.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef UTILITIES_SOCKADDR_IO_H_ -#define UTILITIES_SOCKADDR_IO_H_ - -#include -#include - -std::ostream &operator<<(std::ostream &os, const sockaddr_storage &saddr); - -#endif // UTILITIES_SOCKADDR_IO_H_ \ No newline at end of file diff --git a/src/libaktualizr/utilities/timer.h b/src/libaktualizr/utilities/timer.h index 5f96dc7487..4ebf1bb34b 100644 --- a/src/libaktualizr/utilities/timer.h +++ b/src/libaktualizr/utilities/timer.h @@ -10,10 +10,13 @@ class Timer { public: Timer(); - Timer(const Timer&) = delete; - Timer& operator=(const Timer&) = delete; + ~Timer() = default; + Timer(const Timer &) = delete; + Timer(Timer &&) = delete; + Timer &operator=(const Timer &) = delete; + Timer &operator=(Timer &&) = delete; bool RunningMoreThan(double seconds) const; - friend std::ostream& operator<<(std::ostream& os, const Timer& /*timer*/); + friend std::ostream &operator<<(std::ostream &os, const Timer &timer); private: using Clock = std::chrono::steady_clock; diff --git a/src/libaktualizr/utilities/types.cc b/src/libaktualizr/utilities/types.cc index 1e2936f080..48c6225b14 100644 --- a/src/libaktualizr/utilities/types.cc +++ b/src/libaktualizr/utilities/types.cc @@ -1,16 +1,73 @@ -#include "utilities/types.h" - +#include +#include #include #include -TimeStamp TimeStamp::Now() { +#include "libaktualizr/types.h" +#include "utilities/utils.h" + +std::ostream &operator<<(std::ostream &os, const StorageType stype) { + std::string stype_str; + switch (stype) { + case StorageType::kFileSystem: + stype_str = "filesystem"; + break; + case StorageType::kSqlite: + stype_str = "sqlite"; + break; + default: + stype_str = "unknown"; + break; + } + os << '"' << stype_str << '"'; + return os; +} + +std::ostream &Uptane::operator<<(std::ostream &os, const HardwareIdentifier &hwid) { + os << hwid.ToString(); + return os; +} + +std::ostream &Uptane::operator<<(std::ostream &os, const EcuSerial &ecu_serial) { + os << ecu_serial.ToString(); + return os; +} + +std::ostream &operator<<(std::ostream &os, const BootedType btype) { + std::string btype_str; + switch (btype) { + case BootedType::kStaged: + btype_str = "staged"; + break; + default: + btype_str = "booted"; + break; + } + os << '"' << btype_str << '"'; + return os; +} + +std::ostream &operator<<(std::ostream &os, VerificationType vtype) { + const std::string type_s = Uptane::VerificationTypeToString(vtype); + os << '"' << type_s << '"'; + return os; +} + +std::string TimeToString(struct tm time) { + std::array formatted{}; + strftime(formatted.data(), 22, "%Y-%m-%dT%H:%M:%SZ", &time); + return std::string(formatted.data()); +} + +TimeStamp TimeStamp::Now() { return TimeStamp(CurrentTime()); } + +struct tm TimeStamp::CurrentTime() { time_t raw_time; struct tm time_struct {}; time(&raw_time); gmtime_r(&raw_time, &time_struct); - char formatted[22]; - strftime(formatted, 22, "%Y-%m-%dT%H:%M:%SZ", &time_struct); - return TimeStamp(formatted); + + return time_struct; } TimeStamp::TimeStamp(std::string rfc3339) { @@ -20,6 +77,8 @@ TimeStamp::TimeStamp(std::string rfc3339) { time_ = rfc3339; } +TimeStamp::TimeStamp(struct tm time) : TimeStamp(TimeToString(time)) {} + bool TimeStamp::IsValid() const { return time_.length() != 0; } bool TimeStamp::IsExpiredAt(const TimeStamp &now) const { @@ -42,27 +101,11 @@ std::ostream &operator<<(std::ostream &os, const TimeStamp &t) { } namespace data { -Json::Value Package::toJson() { - Json::Value json; - json["name"] = name; - json["version"] = version; - return json; -} - -Package Package::fromJson(const std::string &json_str) { - Json::Reader reader; - Json::Value json; - reader.parse(json_str, json); - Package package; - package.name = json["name"].asString(); - package.version = json["version"].asString(); - return package; -} const std::map data::ResultCode::string_repr{ {ResultCode::Numeric::kOk, "OK"}, {ResultCode::Numeric::kAlreadyProcessed, "ALREADY_PROCESSED"}, - {ResultCode::Numeric::kValidationFailed, "VALIDATION_FAILED"}, + {ResultCode::Numeric::kVerificationFailed, "VERIFICATION_FAILED"}, {ResultCode::Numeric::kInstallFailed, "INSTALL_FAILED"}, {ResultCode::Numeric::kDownloadFailed, "DOWNLOAD_FAILED"}, {ResultCode::Numeric::kInternalError, "INTERNAL_ERROR"}, @@ -73,7 +116,7 @@ const std::map data::ResultCode::string }; std::string data::ResultCode::toRepr() const { - std::string s = toString(); + std::string s = ToString(); if (s.find('\"') != std::string::npos) { throw std::runtime_error("Result code cannot contain double quotes"); @@ -109,7 +152,7 @@ ResultCode data::ResultCode::fromRepr(const std::string &repr) { Json::Value InstallationResult::toJson() const { Json::Value json; json["success"] = success; - json["code"] = result_code.toString(); + json["code"] = result_code.ToString(); json["description"] = description; return json; } @@ -121,4 +164,9 @@ std::ostream &operator<<(std::ostream &os, const ResultCode &result_code) { } // namespace data +boost::filesystem::path utils::BasedPath::get(const boost::filesystem::path &base) const { + // note: BasedPath(bp.get() == bp) + return Utils::absolutePath(base, p_); +} + // vim: set tabstop=2 shiftwidth=2 expandtab: diff --git a/src/libaktualizr/utilities/types.h b/src/libaktualizr/utilities/types.h deleted file mode 100644 index d57912bbea..0000000000 --- a/src/libaktualizr/utilities/types.h +++ /dev/null @@ -1,199 +0,0 @@ -#ifndef TYPES_H_ -#define TYPES_H_ -/** \file */ - -#include -#include - -#include -#include - -// Keep these int sync with AKIpUptaneKeyType ASN.1 definitions -enum class KeyType { - kED25519 = 0, - kFirstKnown = kED25519, - kRSA2048, - kRSA3072, - kRSA4096, - kLastKnown = kRSA4096, - kUnknown = 0xff -}; - -inline std::ostream& operator<<(std::ostream& os, const KeyType kt) { - std::string kt_str; - switch (kt) { - case KeyType::kRSA2048: - kt_str = "RSA2048"; - break; - case KeyType::kRSA3072: - kt_str = "RSA3072"; - break; - case KeyType::kRSA4096: - kt_str = "RSA4096"; - break; - case KeyType::kED25519: - kt_str = "ED25519"; - break; - default: - kt_str = "unknown"; - break; - } - os << '"' << kt_str << '"'; - return os; -} - -inline std::istream& operator>>(std::istream& is, KeyType& kt) { - std::string kt_str; - - is >> kt_str; - std::transform(kt_str.begin(), kt_str.end(), kt_str.begin(), ::toupper); - - if (kt_str == "\"RSA2048\"") { - kt = KeyType::kRSA2048; - } else if (kt_str == "\"RSA3072\"") { - kt = KeyType::kRSA3072; - } else if (kt_str == "\"RSA4096\"") { - kt = KeyType::kRSA4096; - } else if (kt_str == "\"ED25519\"") { - kt = KeyType::kED25519; - } else { - kt = KeyType::kUnknown; - } - return is; -} - -enum class CryptoSource { kFile = 0, kPkcs11, kAndroid }; - -inline std::ostream& operator<<(std::ostream& os, CryptoSource cs) { - std::string cs_str; - switch (cs) { - case CryptoSource::kFile: - cs_str = "file"; - break; - case CryptoSource::kPkcs11: - cs_str = "pkcs11"; - break; - default: - cs_str = "unknown"; - break; - } - os << '"' << cs_str << '"'; - return os; -} - -// timestamp, compatible with tuf -class TimeStamp { - public: - static TimeStamp Now(); - /** An invalid TimeStamp */ - TimeStamp() { ; } - explicit TimeStamp(std::string rfc3339); - bool IsExpiredAt(const TimeStamp& now) const; - bool IsValid() const; - std::string ToString() const { return time_; } - bool operator<(const TimeStamp& other) const; - bool operator>(const TimeStamp& other) const; - friend std::ostream& operator<<(std::ostream& os, const TimeStamp& t); - bool operator==(const TimeStamp& rhs) const { return time_ == rhs.time_; } - - class InvalidTimeStamp : public std::domain_error { - public: - InvalidTimeStamp() : std::domain_error("invalid timestamp") {} - ~InvalidTimeStamp() noexcept override = default; - }; - - private: - std::string time_; -}; - -std::ostream& operator<<(std::ostream& os, const TimeStamp& t); - -/// General data structures. -namespace data { - -using UpdateRequestId = std::string; -struct Package { - std::string name; - std::string version; - Json::Value toJson(); - static Package fromJson(const std::string& /*json_str*/); -}; - -struct ResultCode { - // These match the old enum representation - // A lot of them were unused and have been dropped - enum class Numeric { - kOk = 0, - /// Operation has already been processed - kAlreadyProcessed = 1, - /// Update image integrity has been compromised - kValidationFailed = 3, - /// Package installation failed - kInstallFailed = 4, - /// Package download failed - kDownloadFailed = 5, - /// SWM Internal integrity error - kInternalError = 18, - /// Other error - kGeneralError = 19, - // Install needs to be finalized (e.g: reboot) - kNeedCompletion = 21, - // Customer specific - kCustomError = 22, - // Unknown - kUnknown = -1, - }; - - // note: intentionally *not* explicit, to make the common case easier - ResultCode(ResultCode::Numeric in_num_code) : num_code(in_num_code) {} - ResultCode(ResultCode::Numeric in_num_code, std::string text_code_in) - : num_code(in_num_code), text_code(std::move(text_code_in)) {} - - bool operator==(const ResultCode& rhs) const { return num_code == rhs.num_code && toString() == rhs.toString(); } - bool operator!=(const ResultCode& rhs) const { return !(*this == rhs); } - friend std::ostream& operator<<(std::ostream& os, const ResultCode& result_code); - - Numeric num_code; - std::string text_code; - - // Allows to have a numeric code with a default representation, but also with - // any string representation - std::string toString() const { - if (text_code != "") { - return text_code; - } - - return std::string(string_repr.at(num_code)); - } - - // non-lossy reprensation for serialization - std::string toRepr() const; - static ResultCode fromRepr(const std::string& repr); - - private: - static const std::map string_repr; -}; - -std::ostream& operator<<(std::ostream& os, const ResultCode& result_code); - -struct InstallationResult { - InstallationResult() = default; - InstallationResult(ResultCode result_code_in, std::string description_in) - : success(result_code_in.num_code == ResultCode::Numeric::kOk), - result_code(std::move(result_code_in)), - description(std::move(description_in)) {} - InstallationResult(bool success_in, ResultCode result_code_in, std::string description_in) - : success(success_in), result_code(std::move(result_code_in)), description(std::move(description_in)) {} - - Json::Value toJson() const; - bool isSuccess() const { return success; }; - bool needCompletion() const { return result_code == ResultCode::Numeric::kNeedCompletion; } - - bool success{true}; - ResultCode result_code{ResultCode::Numeric::kOk}; - std::string description; -}; - -} // namespace data - -#endif diff --git a/src/libaktualizr/utilities/types_test.cc b/src/libaktualizr/utilities/types_test.cc index 0726e01c1c..6e9081c265 100644 --- a/src/libaktualizr/utilities/types_test.cc +++ b/src/libaktualizr/utilities/types_test.cc @@ -1,6 +1,6 @@ #include -#include "utilities/types.h" +#include "libaktualizr/types.h" TimeStamp now("2017-01-01T01:00:00Z"); @@ -34,7 +34,7 @@ TEST(Types, TimeStampNow) { TEST(Types, ResultCode) { data::ResultCode ok_res{data::ResultCode::Numeric::kOk}; EXPECT_EQ(ok_res.num_code, data::ResultCode::Numeric::kOk); - EXPECT_EQ(ok_res.toString(), "OK"); + EXPECT_EQ(ok_res.ToString(), "OK"); std::string repr = ok_res.toRepr(); EXPECT_EQ(repr, "\"OK\":0"); EXPECT_EQ(data::ResultCode::fromRepr(repr), ok_res); diff --git a/src/libaktualizr/utilities/utils.cc b/src/libaktualizr/utilities/utils.cc index 25abd0a484..d75da1f1f6 100644 --- a/src/libaktualizr/utilities/utils.cc +++ b/src/libaktualizr/utilities/utils.cc @@ -1,7 +1,8 @@ #include "utilities/utils.h" -#include #include +#include +#include #include #include #include @@ -33,7 +34,7 @@ #include "aktualizr_version.h" #include "logging/logging.h" -const char *adverbs[] = { +static const std::array adverbs = { "adorable", "acidic", "ample", "aromatic", "artistic", "attractive", "basic", "beautiful", "best", "blissful", "bubbly", "celebrated", "cheap", "chilly", "cloudy", "colorful", "colossal", "complete", "conventional", "costly", "creamy", "crisp", "dense", "double", @@ -52,147 +53,145 @@ const char *adverbs[] = { "uniform", "unusual", "valuable", "vast", "warm", "wavy", "wet", "whole", "wide", "wild", "wooden", "young"}; -const char *names[] = {"Allerlei", - "Apfelkuchen", - "Backerbsen", - "Baumkuchen", - "Beetenbartsch", - "Berliner", - "Bethmaennchen", - "Biersuppe", - "Birnenfladen", - "Bohnen", - "Bratapfel", - "Bratkartoffeln", - "Brezel", - "Broetchen", - "Butterkuchen", - "Currywurst", - "Dampfnudel", - "Dibbelabbes", - "Eierkuchen", - "Eintopf", - "Erbsensuppe", - "Flaedlesuppe", - "Flammkuchen", - "Fliederbeersuppe", - "Franzbroetchen", - "Funkenkuechlein", - "Gedadschde", - "Gemueseschnitzel", - "Germknoedel", - "Gerstensuppe", - "Griessbrei", - "Gruenkohl", - "Gruetze", - "Gummibaerchen", - "Gurkensalat", - "Habermus", - "Haddekuche", - "Hagebuttenmark", - "Handkaese", - "Herrencreme", - "Hoorische", - "Kaesekuchen", - "Kaiserschmarrn", - "Kartoffelpueree", - "Kartoffelpuffer", - "Kartoffelsalat", - "Kastanien", - "Kichererbsen", - "Kirschenmichel", - "Kirschtorte", - "Klaben", - "Kloesse", - "Kluntjes", - "Knaeckebrot", - "Kniekuechle", - "Knoedel", - "Kohlroulade", - "Krautfleckerl", - "Kuerbiskernbrot", - "Kuerbissuppe", - "Lebkuchen", - "Linsen", - "Loeffelerbsen", - "Magenbrot", - "Marillenknoedel", - "Maroni", - "Marsch", - "Marzipan", - "Maultaschen", - "Milliramstrudel", - "Mischbrot", - "Mohnnudeln", - "Mohnpielen", - "Mohnzelten", - "Muesli", - "Nussecke", - "Nusstorte", - "Palatschinke", - "Pellkartoffeln", - "Pfannkuchen", - "Pfefferkuchen", - "Pillekuchen", - "Pommes", - "Poschweck", - "Powidltascherl", - "Printen", - "Prinzregententorte", - "Pumpernickel", - "Punschkrapfen", - "Quarkkeulchen", - "Quetschkartoffeln", - "Raclette", - "Radi", - "Reibekuchen", - "Reinling", - "Riebel", - "Roeggelchen", - "Roesti", - "Sauerkraut", - "Schmalzkuchen", - "Schmorgurken", - "Schnippelbohnen", - "Schoeberl", - "Schrippe", - "Schupfnudel", - "Schuxen", - "Schwammerlsuppe", - "Schweineohren", - "Sonnenblumenkernbrot", - "Spaetzle", - "Spaghettieis", - "Spargel", - "Spekulatius", - "Springerle", - "Spritzkuchen", - "Stampfkartoffeln", - "Sterz", - "Stollen", - "Streuselkuchen", - "Tilsit", - "Toastbrot", - "Topfenstrudel", - "Vollkornbrot", - "Wibele", - "Wickelkloesse", - "Zimtwaffeln", - "Zwetschkenroester", - "Zwiebelkuchen"}; - -typedef boost::archive::iterators::base64_from_binary< - boost::archive::iterators::transform_width > - base64_text; - -typedef boost::archive::iterators::transform_width< +static const std::array names = {"Allerlei", + "Apfelkuchen", + "Backerbsen", + "Baumkuchen", + "Beetenbartsch", + "Berliner", + "Bethmaennchen", + "Biersuppe", + "Birnenfladen", + "Bohnen", + "Bratapfel", + "Bratkartoffeln", + "Brezel", + "Broetchen", + "Butterkuchen", + "Currywurst", + "Dampfnudel", + "Dibbelabbes", + "Eierkuchen", + "Eintopf", + "Erbsensuppe", + "Flaedlesuppe", + "Flammkuchen", + "Fliederbeersuppe", + "Franzbroetchen", + "Funkenkuechlein", + "Gedadschde", + "Gemueseschnitzel", + "Germknoedel", + "Gerstensuppe", + "Griessbrei", + "Gruenkohl", + "Gruetze", + "Gummibaerchen", + "Gurkensalat", + "Habermus", + "Haddekuche", + "Hagebuttenmark", + "Handkaese", + "Herrencreme", + "Hoorische", + "Kaesekuchen", + "Kaiserschmarrn", + "Kartoffelpueree", + "Kartoffelpuffer", + "Kartoffelsalat", + "Kastanien", + "Kichererbsen", + "Kirschenmichel", + "Kirschtorte", + "Klaben", + "Kloesse", + "Kluntjes", + "Knaeckebrot", + "Kniekuechle", + "Knoedel", + "Kohlroulade", + "Krautfleckerl", + "Kuerbiskernbrot", + "Kuerbissuppe", + "Lebkuchen", + "Linsen", + "Loeffelerbsen", + "Magenbrot", + "Marillenknoedel", + "Maroni", + "Marsch", + "Marzipan", + "Maultaschen", + "Milliramstrudel", + "Mischbrot", + "Mohnnudeln", + "Mohnpielen", + "Mohnzelten", + "Muesli", + "Nussecke", + "Nusstorte", + "Palatschinke", + "Pellkartoffeln", + "Pfannkuchen", + "Pfefferkuchen", + "Pillekuchen", + "Pommes", + "Poschweck", + "Powidltascherl", + "Printen", + "Prinzregententorte", + "Pumpernickel", + "Punschkrapfen", + "Quarkkeulchen", + "Quetschkartoffeln", + "Raclette", + "Radi", + "Reibekuchen", + "Reinling", + "Riebel", + "Roeggelchen", + "Roesti", + "Sauerkraut", + "Schmalzkuchen", + "Schmorgurken", + "Schnippelbohnen", + "Schoeberl", + "Schrippe", + "Schupfnudel", + "Schuxen", + "Schwammerlsuppe", + "Schweineohren", + "Sonnenblumenkernbrot", + "Spaetzle", + "Spaghettieis", + "Spargel", + "Spekulatius", + "Springerle", + "Spritzkuchen", + "Stampfkartoffeln", + "Sterz", + "Stollen", + "Streuselkuchen", + "Tilsit", + "Toastbrot", + "Topfenstrudel", + "Vollkornbrot", + "Wibele", + "Wickelkloesse", + "Zimtwaffeln", + "Zwetschkenroester", + "Zwiebelkuchen"}; + +using base64_text = boost::archive::iterators::base64_from_binary< + boost::archive::iterators::transform_width >; + +using base64_to_bin = boost::archive::iterators::transform_width< boost::archive::iterators::binary_from_base64< boost::archive::iterators::remove_whitespace >, - 8, 6> - base64_to_bin; + 8, 6>; std::string Utils::fromBase64(std::string base64_string) { - int64_t paddingChars = std::count(base64_string.begin(), base64_string.end(), '='); + std::ptrdiff_t paddingChars = std::count(base64_string.begin(), base64_string.end(), '='); std::replace(base64_string.begin(), base64_string.end(), '=', 'A'); std::string result(base64_to_bin(base64_string.begin()), base64_to_bin(base64_string.end())); result.erase(result.end() - paddingChars, result.end()); @@ -243,9 +242,9 @@ std::string Utils::extractField(const std::string &in, unsigned int field_id) { } Json::Value Utils::parseJSON(const std::string &json_str) { - Json::Reader reader; + std::istringstream strs(json_str); Json::Value json_value; - reader.parse(json_str, json_value); + parseFromStream(Json::CharReaderBuilder(), strs, &json_value, nullptr); return json_value; } @@ -258,13 +257,13 @@ Json::Value Utils::parseJSONFile(const boost::filesystem::path &filename) { std::string Utils::genPrettyName() { std::random_device urandom; - std::uniform_int_distribution<> adverbs_dist(0, (sizeof(adverbs) / sizeof(char *)) - 1); - std::uniform_int_distribution<> nouns_dist(0, (sizeof(names) / sizeof(char *)) - 1); - std::uniform_int_distribution<> digits(0, 999); + std::uniform_int_distribution adverbs_dist(0, adverbs.size() - 1); + std::uniform_int_distribution nouns_dist(0, names.size() - 1); + std::uniform_int_distribution digits(0, 999); std::stringstream pretty_name; - pretty_name << adverbs[adverbs_dist(urandom)]; + pretty_name << adverbs.at(adverbs_dist(urandom)); pretty_name << "-"; - pretty_name << names[nouns_dist(urandom)]; + pretty_name << names.at(nouns_dist(urandom)); pretty_name << "-"; pretty_name << digits(urandom); std::string res = pretty_name.str(); @@ -275,7 +274,7 @@ std::string Utils::genPrettyName() { std::string Utils::readFile(const boost::filesystem::path &filename, const bool trim) { boost::filesystem::path tmpFilename = filename; tmpFilename += ".new"; - + // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) if (boost::filesystem::exists(tmpFilename)) { LOG_WARNING << tmpFilename << " was found on FS, removing"; boost::filesystem::remove(tmpFilename); @@ -292,13 +291,14 @@ std::string Utils::readFile(const boost::filesystem::path &filename, const bool static constexpr size_t BSIZE = 20 * 512; struct archive_state { - archive_state(std::istream &is_in) : is(is_in) {} + public: + explicit archive_state(std::istream &is_in) : is(is_in) {} std::istream &is; std::array buf{}; }; static ssize_t read_cb(struct archive *a, void *client_data, const void **buffer) { - auto s = reinterpret_cast(client_data); + auto *s = reinterpret_cast(client_data); if (s->is.fail()) { archive_set_error(a, -1, "unable to read from stream"); return 0; @@ -349,7 +349,14 @@ std::string Utils::jsonToStr(const Json::Value &json) { return ss.str(); } -std::string Utils::jsonToCanonicalStr(const Json::Value &json) { return Json::FastWriter().write(json); } +std::string Utils::jsonToCanonicalStr(const Json::Value &json) { + static Json::StreamWriterBuilder wbuilder = []() { + Json::StreamWriterBuilder w; + wbuilder["indentation"] = ""; + return w; + }(); + return Json::writeString(wbuilder, json); +} Json::Value Utils::getHardwareInfo() { std::string result; @@ -388,7 +395,7 @@ Json::Value Utils::getNetworkInfo() { } } - if (itf.name != "") { + if (!itf.name.empty()) { { // get ip address StructGuard ifaddrs(nullptr, freeifaddrs); @@ -434,11 +441,11 @@ Json::Value Utils::getNetworkInfo() { } std::string Utils::getHostname() { - char hostname[200]; - if (gethostname(hostname, 200) < 0) { + std::array hostname{}; + if (gethostname(hostname.data(), hostname.size()) < 0) { return ""; } - return hostname; + return std::string(hostname.data()); } std::string Utils::randomUuid() { @@ -447,7 +454,8 @@ std::string Utils::randomUuid() { return boost::uuids::to_string(uuid_gen()); } -// FIXME Doesn't work with broken symlinks +// Note that this doesn't work with broken symlinks. +// NOLINTNEXTLINE(misc-no-recursion) void Utils::copyDir(const boost::filesystem::path &from, const boost::filesystem::path &to) { boost::filesystem::remove_all(to); @@ -464,27 +472,26 @@ void Utils::copyDir(const boost::filesystem::path &from, const boost::filesystem } std::string Utils::readFileFromArchive(std::istream &as, const std::string &filename, const bool trim) { - struct archive *a = archive_read_new(); + StructGuardInt a(archive_read_new(), archive_read_free); if (a == nullptr) { LOG_ERROR << "archive error: could not initialize archive object"; throw std::runtime_error("archive error"); } - archive_read_support_filter_all(a); - archive_read_support_format_all(a); + archive_read_support_filter_all(a.get()); + archive_read_support_format_all(a.get()); auto state = std_::make_unique(std::ref(as)); - int r = archive_read_open(a, reinterpret_cast(state.get()), nullptr, read_cb, nullptr); + int r = archive_read_open(a.get(), reinterpret_cast(state.get()), nullptr, read_cb, nullptr); if (r != ARCHIVE_OK) { - LOG_ERROR << "archive error: " << archive_error_string(a); - archive_read_free(a); + LOG_ERROR << "archive error: " << archive_error_string(a.get()); throw std::runtime_error("archive error"); } bool found = false; std::stringstream out_stream; struct archive_entry *entry; - while (archive_read_next_header(a, &entry) == ARCHIVE_OK) { + while (archive_read_next_header(a.get(), &entry) == ARCHIVE_OK) { if (filename != archive_entry_pathname(entry)) { - archive_read_data_skip(a); + archive_read_data_skip(a.get()); continue; } @@ -493,13 +500,12 @@ std::string Utils::readFileFromArchive(std::istream &as, const std::string &file int64_t offset; for (;;) { - r = archive_read_data_block(a, reinterpret_cast(&buff), &size, &offset); + r = archive_read_data_block(a.get(), reinterpret_cast(&buff), &size, &offset); if (r == ARCHIVE_EOF) { found = true; break; - } - if (r != ARCHIVE_OK) { - LOG_ERROR << "archive error: " << archive_error_string(a); + } else if (r != ARCHIVE_OK) { + LOG_ERROR << "archive error: " << archive_error_string(a.get()); break; } if (size > 0 && buff != nullptr) { @@ -508,9 +514,9 @@ std::string Utils::readFileFromArchive(std::istream &as, const std::string &file } } - r = archive_read_free(a); + r = archive_read_close(a.get()); if (r != ARCHIVE_OK) { - LOG_ERROR << "archive error: " << archive_error_string(a); + LOG_ERROR << "archive error: " << archive_error_string(a.get()); } if (!found) { @@ -525,7 +531,7 @@ std::string Utils::readFileFromArchive(std::istream &as, const std::string &file } static ssize_t write_cb(struct archive *a, void *client_data, const void *buffer, size_t length) { - auto s = reinterpret_cast(client_data); + auto *s = reinterpret_cast(client_data); s->write(reinterpret_cast(buffer), static_cast(length)); if (s->fail()) { archive_set_error(a, -1, "unable to write in stream"); @@ -536,45 +542,140 @@ static ssize_t write_cb(struct archive *a, void *client_data, const void *buffer } void Utils::writeArchive(const std::map &entries, std::ostream &as) { - struct archive *a = archive_write_new(); + StructGuardInt a(archive_write_new(), archive_write_free); if (a == nullptr) { LOG_ERROR << "archive error: could not initialize archive object"; throw std::runtime_error("archive error"); } - archive_write_set_format_pax(a); - archive_write_add_filter_gzip(a); + archive_write_set_format_pax(a.get()); + archive_write_add_filter_gzip(a.get()); - int r = archive_write_open(a, reinterpret_cast(&as), nullptr, write_cb, nullptr); + int r = archive_write_open(a.get(), reinterpret_cast(&as), nullptr, write_cb, nullptr); if (r != ARCHIVE_OK) { - LOG_ERROR << "archive error: " << archive_error_string(a); - archive_write_free(a); + LOG_ERROR << "archive error: " << archive_error_string(a.get()); throw std::runtime_error("archive error"); } - struct archive_entry *entry = archive_entry_new(); + StructGuard entry(archive_entry_new(), archive_entry_free); for (const auto &el : entries) { - archive_entry_clear(entry); - archive_entry_set_filetype(entry, AE_IFREG); - archive_entry_set_perm(entry, S_IRWXU | S_IRWXG | S_IRWXO); - archive_entry_set_size(entry, static_cast(el.second.size())); - archive_entry_set_pathname(entry, el.first.c_str()); - if (archive_write_header(a, entry) != 0) { - LOG_ERROR << "archive error: " << archive_error_string(a); - archive_entry_free(entry); - archive_write_free(a); + archive_entry_clear(entry.get()); + archive_entry_set_filetype(entry.get(), AE_IFREG); + archive_entry_set_perm(entry.get(), S_IRWXU | S_IRWXG | S_IRWXO); + archive_entry_set_size(entry.get(), static_cast(el.second.size())); + archive_entry_set_pathname(entry.get(), el.first.c_str()); + if (archive_write_header(a.get(), entry.get()) != 0) { + LOG_ERROR << "archive error: " << archive_error_string(a.get()); throw std::runtime_error("archive error"); } - if (archive_write_data(a, el.second.c_str(), el.second.size()) < 0) { - LOG_ERROR << "archive error: " << archive_error_string(a); - archive_entry_free(entry); - archive_write_free(a); + if (archive_write_data(a.get(), el.second.c_str(), el.second.size()) < 0) { + LOG_ERROR << "archive error: " << archive_error_string(a.get()); throw std::runtime_error("archive error"); } } - archive_entry_free(entry); - r = archive_write_free(a); + r = archive_write_close(a.get()); if (r != ARCHIVE_OK) { - LOG_ERROR << "archive error: " << archive_error_string(a); + LOG_ERROR << "archive error: " << archive_error_string(a.get()); + } +} + +/* Removing a file from an archive isn't possible in the obvious sense. The only + * way to do so in practice is to create a new archive, copy everything you + * _don't_ want to remove, and then replace the old archive with the new one. + */ +void Utils::removeFileFromArchive(const boost::filesystem::path &archive_path, const std::string &filename) { + std::ifstream as_in(archive_path.c_str(), std::ios::in | std::ios::binary); + if (as_in.fail()) { + LOG_ERROR << "Unable to open provided provisioning archive " << archive_path << ": " << std::strerror(errno); + throw std::runtime_error("Unable to parse provisioning credentials"); + } + const boost::filesystem::path outfile = archive_path.string() + "-" + boost::filesystem::unique_path().string(); + std::ofstream as_out(outfile.c_str(), std::ios::out | std::ios::binary); + if (as_out.fail()) { + LOG_ERROR << "Unable to create file " << outfile << ": " << std::strerror(errno); + throw std::runtime_error("Unable to parse provisioning credentials"); + } + + StructGuardInt a_in(archive_read_new(), archive_read_free); + if (a_in == nullptr) { + LOG_ERROR << "archive error: could not initialize archive object"; + throw std::runtime_error("archive error"); + } + archive_read_support_filter_all(a_in.get()); + archive_read_support_format_all(a_in.get()); + auto state = std_::make_unique(std::ref(as_in)); + int r = archive_read_open(a_in.get(), reinterpret_cast(state.get()), nullptr, read_cb, nullptr); + if (r != ARCHIVE_OK) { + LOG_ERROR << "archive error: " << archive_error_string(a_in.get()); + throw std::runtime_error("archive error"); + } + + StructGuardInt a_out(archive_write_new(), archive_write_free); + if (a_out == nullptr) { + LOG_ERROR << "archive error: could not initialize archive object"; + throw std::runtime_error("archive error"); + } + archive_write_set_format_zip(a_out.get()); + r = archive_write_open(a_out.get(), reinterpret_cast(&as_out), nullptr, write_cb, nullptr); + if (r != ARCHIVE_OK) { + LOG_ERROR << "archive error: " << archive_error_string(a_out.get()); + throw std::runtime_error("archive error"); + } + + bool found = false; + struct archive_entry *entry_in; + while (archive_read_next_header(a_in.get(), &entry_in) == ARCHIVE_OK) { + const char *entry_name = archive_entry_pathname(entry_in); + if (filename == entry_name) { + archive_read_data_skip(a_in.get()); + found = true; + continue; + } + + StructGuard entry_out(archive_entry_new(), archive_entry_free); + const struct stat *entry_stat = archive_entry_stat(entry_in); + archive_entry_copy_stat(entry_out.get(), entry_stat); + archive_entry_set_pathname(entry_out.get(), entry_name); + if (archive_write_header(a_out.get(), entry_out.get()) != 0) { + LOG_ERROR << "archive error: " << archive_error_string(a_out.get()); + throw std::runtime_error("archive error"); + } + + const char *buff; + size_t size; + int64_t offset; + + for (;;) { + r = archive_read_data_block(a_in.get(), reinterpret_cast(&buff), &size, &offset); + if (r == ARCHIVE_EOF) { + break; + } else if (r != ARCHIVE_OK) { + LOG_ERROR << "archive error: " << archive_error_string(a_in.get()); + break; + } + if (size > 0 && buff != nullptr) { + if (archive_write_data(a_out.get(), buff, size) < 0) { + LOG_ERROR << "archive error: " << archive_error_string(a_out.get()); + throw std::runtime_error("archive error"); + } + } + } + } + + r = archive_read_close(a_in.get()); + if (r != ARCHIVE_OK) { + LOG_ERROR << "archive error: " << archive_error_string(a_in.get()); + } + + r = archive_write_close(a_out.get()); + if (r != ARCHIVE_OK) { + LOG_ERROR << "archive error: " << archive_error_string(a_out.get()); + } + + if (found) { + boost::filesystem::rename(outfile, archive_path); + } else { + boost::filesystem::remove(outfile); + throw std::runtime_error("Requested file not found in archive!"); } } @@ -589,18 +690,18 @@ sockaddr_storage Utils::ipGetSockaddr(int fd) { } std::string Utils::ipDisplayName(const sockaddr_storage &saddr) { - char ipstr[INET6_ADDRSTRLEN]; + std::array ipstr{}; switch (saddr.ss_family) { case AF_INET: { const auto *sa = reinterpret_cast(&saddr); - inet_ntop(AF_INET, &sa->sin_addr, ipstr, sizeof(ipstr)); - return std::string(ipstr); + inet_ntop(AF_INET, &sa->sin_addr, ipstr.data(), ipstr.size()); + return std::string(ipstr.data()); } case AF_INET6: { const auto *sa = reinterpret_cast(&saddr); - inet_ntop(AF_INET6, &sa->sin6_addr, ipstr, sizeof(ipstr)); - return std::string(ipstr); + inet_ntop(AF_INET6, &sa->sin6_addr, ipstr.data(), ipstr.size()); + return std::string(ipstr.data()); } default: return "unknown"; @@ -619,11 +720,11 @@ int Utils::ipPort(const sockaddr_storage &saddr) { return -1; } - return ntohs(p); + return ntohs(p); // NOLINT(readability-isolate-declaration) } int Utils::shell(const std::string &command, std::string *output, bool include_stderr) { - char buffer[128]; + std::array buffer{}; std::string full_command(command); if (include_stderr) { full_command += " 2>&1"; @@ -634,8 +735,8 @@ int Utils::shell(const std::string &command, std::string *output, bool include_s return -1; } while (feof(pipe) == 0) { - if (fgets(buffer, 128, pipe) != nullptr) { - *output += buffer; + if (fgets(buffer.data(), buffer.size(), pipe) != nullptr) { + *output += buffer.data(); } } int exitcode = pclose(pipe); @@ -654,9 +755,10 @@ boost::filesystem::path Utils::absolutePath(const boost::filesystem::path &root, std::vector Utils::getDirEntriesByExt(const boost::filesystem::path &dir_path, const std::string &ext) { std::vector entries; - boost::filesystem::directory_iterator entryItEnd, entryIt(dir_path); + boost::filesystem::directory_iterator entryItEnd; + boost::filesystem::directory_iterator entryIt(dir_path); for (; entryIt != entryItEnd; ++entryIt) { - auto &entry_path = entryIt->path(); + const auto &entry_path = entryIt->path(); if (!boost::filesystem::is_directory(*entryIt) && entry_path.extension().string() == ext) { entries.push_back(entry_path); } @@ -665,6 +767,7 @@ std::vector Utils::getDirEntriesByExt(const boost::file return entries; } +// NOLINTNEXTLINE(misc-no-recursion) void Utils::createDirectories(const boost::filesystem::path &path, mode_t mode) { boost::filesystem::path parent = path.parent_path(); if (!parent.empty() && !boost::filesystem::exists(parent)) { @@ -711,7 +814,7 @@ std::string Utils::urlEncode(const std::string &input) { return res; } -CURL *Utils::curlDupHandleWrapper(CURL *const curl_in, const bool using_pkcs11) { +CURL *Utils::curlDupHandleWrapper(CURL *const curl_in, const bool using_pkcs11, CURLSH *share) { CURL *curl = curl_easy_duphandle(curl_in); // This is a workaround for a bug in curl. It has been fixed in @@ -721,13 +824,18 @@ CURL *Utils::curlDupHandleWrapper(CURL *const curl_in, const bool using_pkcs11) if (using_pkcs11) { curlEasySetoptWrapper(curl, CURLOPT_SSLENGINE, "pkcs11"); } + if (share != nullptr) { + curl_easy_setopt(curl, CURLOPT_SHARE, share); + } return curl; } class SafeTempRoot { public: SafeTempRoot(const SafeTempRoot &) = delete; + SafeTempRoot(SafeTempRoot &&) = delete; SafeTempRoot operator=(const SafeTempRoot &) = delete; + SafeTempRoot operator=(SafeTempRoot &&) = delete; // provide this as a static method so that we can use C++ static destructor // to remove the temp root static boost::filesystem::path &Get() { @@ -744,7 +852,7 @@ class SafeTempRoot { } boost::filesystem::path p = prefix / boost::filesystem::unique_path("aktualizr-%%%%-%%%%-%%%%-%%%%"); if (mkdir(p.c_str(), S_IRWXU) == -1) { - throw std::runtime_error(std::string("could not create temporary directory root: ").append(p.native())); + throw std::runtime_error(std::string("Could not create temporary directory root: ").append(p.native())); } path = boost::filesystem::path(p); @@ -760,7 +868,7 @@ class SafeTempRoot { boost::filesystem::path path; }; -std::string Utils::storage_root_path_; +std::string Utils::storage_root_path_; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) void Utils::setStorageRootPath(const std::string &storage_root_path) { storage_root_path_ = storage_root_path; } @@ -775,12 +883,13 @@ const char *Utils::getUserAgent() { return user_agent_.c_str(); } -std::string Utils::user_agent_; +std::string Utils::user_agent_; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) void Utils::setCaPath(boost::filesystem::path path) { ca_path_ = std::move(path); } const char *Utils::getCaPath() { return ca_path_.c_str(); } +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) boost::filesystem::path Utils::ca_path_{"/etc/ssl/certs"}; TemporaryFile::TemporaryFile(const std::string &hint) @@ -788,7 +897,7 @@ TemporaryFile::TemporaryFile(const std::string &hint) TemporaryFile::~TemporaryFile() { boost::filesystem::remove(tmp_name_); } -void TemporaryFile::PutContents(const std::string &contents) { +void TemporaryFile::PutContents(const std::string &contents) const { mode_t mode = S_IRUSR | S_IWUSR; int fd = open(Path().c_str(), O_WRONLY | O_CREAT | O_TRUNC, mode); if (fd < 0) { @@ -821,59 +930,67 @@ boost::filesystem::path TemporaryDirectory::operator/(const boost::filesystem::p std::string TemporaryDirectory::PathString() const { return Path().string(); } -void Utils::setSocketPort(sockaddr_storage *addr, in_port_t port) { - if (addr->ss_family == AF_INET) { - reinterpret_cast(addr)->sin_port = port; - } else if (addr->ss_family == AF_INET6) { - reinterpret_cast(addr)->sin6_port = port; +Socket::Socket() { + socket_fd_ = socket(AF_INET, SOCK_STREAM, 0); + if (-1 == socket_fd_) { + throw std::system_error(errno, std::system_category(), "socket"); } } -bool operator<(const sockaddr_storage &left, const sockaddr_storage &right) { - if (left.ss_family == AF_INET) { - throw std::runtime_error("IPv4 addresses are not supported"); - } - const unsigned char *left_addr = reinterpret_cast(&left)->sin6_addr.s6_addr; // NOLINT - const unsigned char *right_addr = reinterpret_cast(&right)->sin6_addr.s6_addr; // NOLINT - int res = memcmp(left_addr, right_addr, 16); +Socket::~Socket() { ::close(socket_fd_); } - return (res < 0); +std::string Socket::ToString() const { + auto saddr = Utils::ipGetSockaddr(socket_fd_); + return Utils::ipDisplayName(saddr) + ":" + std::to_string(Utils::ipPort(saddr)); } -Socket::Socket(const std::string &ip, uint16_t port) : sock_address_{} { - memset(&sock_address_, 0, sizeof(sock_address_)); - sock_address_.sin_family = AF_INET; - inet_pton(AF_INET, ip.c_str(), &(sock_address_.sin_addr)); - sock_address_.sin_port = htons(port); +void Socket::bind(in_port_t port, bool reuse) const { + sockaddr_in sa{}; + memset(&sa, 0, sizeof(sa)); + sa.sin_family = AF_INET; + sa.sin_port = htons(port); // NOLINT(readability-isolate-declaration) + sa.sin_addr.s_addr = htonl(INADDR_ANY); // NOLINT(readability-isolate-declaration) - socket_fd_ = socket(AF_INET, SOCK_STREAM, 0); - if (socket_fd_ < 0) { + int reuseaddr = reuse ? 1 : 0; + if (-1 == setsockopt(socket_fd_, SOL_SOCKET, SO_REUSEADDR, &reuseaddr, sizeof(reuseaddr))) { throw std::system_error(errno, std::system_category(), "socket"); } -} -Socket::~Socket() { - shutdown(socket_fd_, SHUT_RDWR); - ::close(socket_fd_); + if (-1 == ::bind(socket_fd_, reinterpret_cast(&sa), sizeof(sa))) { + throw std::system_error(errno, std::system_category(), "socket"); + } } -int Socket::bind(in_port_t port, bool reuse) { - sockaddr_in sa{}; - memset(&sa, 0, sizeof(sa)); - sa.sin_family = AF_INET; - sa.sin_port = htons(port); - sa.sin_addr.s_addr = htonl(INADDR_ANY); +ListenSocket::ListenSocket(in_port_t port) : _port(port) { + bind(port); + if (_port == 0) { + // ephemeral port was bound, find out its real port number + auto ephemeral_port = Utils::ipPort(Utils::ipGetSockaddr(socket_fd_)); + if (-1 != ephemeral_port) { + _port = static_cast(ephemeral_port); + } + } +} - int reuseaddr = reuse ? 1 : 0; - if (setsockopt(socket_fd_, SOL_SOCKET, SO_REUSEADDR, &reuseaddr, sizeof(reuseaddr)) < 0) { - return errno; +ConnectionSocket::ConnectionSocket(const std::string &ip, in_port_t port, in_port_t bind_port) + : remote_sock_address_{} { + memset(&remote_sock_address_, 0, sizeof(remote_sock_address_)); + remote_sock_address_.sin_family = AF_INET; + if (-1 == inet_pton(AF_INET, ip.c_str(), &(remote_sock_address_.sin_addr))) { + throw std::system_error(errno, std::system_category(), "socket"); } + remote_sock_address_.sin_port = htons(port); // NOLINT(readability-isolate-declaration) - return ::bind(socket_fd_, reinterpret_cast(&sa), sizeof(sa)); + if (bind_port > 0) { + bind(bind_port); + } } -int Socket::connect() { - return ::connect(socket_fd_, reinterpret_cast(&sock_address_), sizeof(sock_address_)); +ConnectionSocket::~ConnectionSocket() { ::shutdown(socket_fd_, SHUT_RDWR); } + +int ConnectionSocket::connect() { + return ::connect(socket_fd_, reinterpret_cast(&remote_sock_address_), + sizeof(remote_sock_address_)); } CurlEasyWrapper::CurlEasyWrapper() { diff --git a/src/libaktualizr/utilities/utils.h b/src/libaktualizr/utilities/utils.h index c6bf73f930..99245e570f 100644 --- a/src/libaktualizr/utilities/utils.h +++ b/src/libaktualizr/utilities/utils.h @@ -1,7 +1,7 @@ #ifndef UTILS_H_ #define UTILS_H_ -#include +#include #include #include @@ -31,6 +31,7 @@ struct Utils { static void copyDir(const boost::filesystem::path &from, const boost::filesystem::path &to); static std::string readFileFromArchive(std::istream &as, const std::string &filename, bool trim = false); static void writeArchive(const std::map &entries, std::ostream &as); + static void removeFileFromArchive(const boost::filesystem::path &archive_path, const std::string &filename); static Json::Value getHardwareInfo(); static Json::Value getNetworkInfo(); static std::string getHostname(); @@ -40,11 +41,10 @@ struct Utils { static int ipPort(const sockaddr_storage &saddr); static int shell(const std::string &command, std::string *output, bool include_stderr = false); static boost::filesystem::path absolutePath(const boost::filesystem::path &root, const boost::filesystem::path &file); - static void setSocketPort(sockaddr_storage *addr, in_port_t port); static void createDirectories(const boost::filesystem::path &path, mode_t mode); static bool createSecureDirectory(const boost::filesystem::path &path); static std::string urlEncode(const std::string &input); - static CURL *curlDupHandleWrapper(CURL *curl_in, bool using_pkcs11); + static CURL *curlDupHandleWrapper(CURL *curl_in, bool using_pkcs11, CURLSH *share); static std::vector getDirEntriesByExt(const boost::filesystem::path &dir_path, const std::string &ext); static void setStorageRootPath(const std::string &storage_root_path); @@ -57,9 +57,9 @@ struct Utils { static const char *getCaPath(); private: - static std::string storage_root_path_; - static std::string user_agent_; - static boost::filesystem::path ca_path_; + static std::string storage_root_path_; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) + static std::string user_agent_; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) + static boost::filesystem::path ca_path_; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) }; /** @@ -68,10 +68,12 @@ struct Utils { class TemporaryFile { public: explicit TemporaryFile(const std::string &hint = "file"); - TemporaryFile(const TemporaryFile &) = delete; - TemporaryFile operator=(const TemporaryFile &) = delete; ~TemporaryFile(); - void PutContents(const std::string &contents); + TemporaryFile(const TemporaryFile &guard) = delete; + TemporaryFile(TemporaryFile &&) = delete; + TemporaryFile &operator=(const TemporaryFile &guard) = delete; + TemporaryFile &operator=(TemporaryFile &&) = delete; + void PutContents(const std::string &contents) const; boost::filesystem::path Path() const; std::string PathString() const; @@ -82,9 +84,11 @@ class TemporaryFile { class TemporaryDirectory { public: explicit TemporaryDirectory(const std::string &hint = "dir"); - TemporaryDirectory(const TemporaryDirectory &) = delete; - TemporaryDirectory operator=(TemporaryDirectory &) = delete; ~TemporaryDirectory(); + TemporaryDirectory(const TemporaryDirectory &guard) = delete; + TemporaryDirectory(TemporaryDirectory &&) = delete; + TemporaryDirectory &operator=(const TemporaryDirectory &guard) = delete; + TemporaryDirectory &operator=(TemporaryDirectory &&) = delete; boost::filesystem::path Path() const; std::string PathString() const; boost::filesystem::path operator/(const boost::filesystem::path &subdir) const; @@ -93,58 +97,56 @@ class TemporaryDirectory { boost::filesystem::path tmp_name_; }; -// Can represent an absolute or relative path, only readable through the -// `.get()` method -// -// The intent is to avoid unintentional use of the "naked" relative path by -// mandating a base directory for each instantiation -class BasedPath { - public: - BasedPath(boost::filesystem::path p) : p_(std::move(p)) {} - - boost::filesystem::path get(const boost::filesystem::path &base) const { - // note: BasedPath(bp.get()) == bp - return Utils::absolutePath(base, p_); - } - - bool empty() const { return p_.empty(); } - bool operator==(const BasedPath &b) const { return p_ == b.p_; } - bool operator!=(const BasedPath &b) const { return !(*this == b); } - - private: - boost::filesystem::path p_; -}; - // helper template for C (mostly openssl) data structured // user should still take care about the order of destruction // by instantiating StructGuard<> in a right order. // BTW local variables are destructed in reverse order of instantiation template using StructGuard = std::unique_ptr; +template +using StructGuardInt = std::unique_ptr; -// helper object for RAII socket management -struct SocketCloser { - void operator()(const int *ptr) const { - close(*ptr); - delete ptr; - } -}; +class Socket { + public: + Socket(); + explicit Socket(int fd) : socket_fd_(fd) {} + virtual ~Socket(); + Socket(const Socket &guard) = delete; + Socket(Socket &&) = delete; + Socket &operator=(const Socket &guard) = delete; + Socket &operator=(Socket &&) = delete; -using SocketHandle = std::unique_ptr; -bool operator<(const sockaddr_storage &left, const sockaddr_storage &right); // required by std::map + int &operator*() { return socket_fd_; } + std::string ToString() const; -class Socket { + protected: + void bind(in_port_t port, bool reuse = true) const; + + int socket_fd_; +}; + +class ConnectionSocket : public Socket { public: - Socket(const std::string &ip, uint16_t port); - ~Socket(); + ConnectionSocket(const std::string &ip, in_port_t port, in_port_t bind_port = 0); + ~ConnectionSocket() override; + ConnectionSocket(const ConnectionSocket &guard) = delete; + ConnectionSocket(ConnectionSocket &&) = delete; + ConnectionSocket &operator=(const ConnectionSocket &guard) = delete; + ConnectionSocket &operator=(ConnectionSocket &&) = delete; - int bind(in_port_t port, bool reuse = true); int connect(); - int getFD() { return socket_fd_; } private: - struct sockaddr_in sock_address_; - int socket_fd_; + struct sockaddr_in remote_sock_address_; +}; + +class ListenSocket : public Socket { + public: + explicit ListenSocket(in_port_t port); + in_port_t port() const { return _port; } + + private: + in_port_t _port; }; // wrapper for curl handles @@ -152,6 +154,10 @@ class CurlEasyWrapper { public: CurlEasyWrapper(); ~CurlEasyWrapper(); + CurlEasyWrapper(const CurlEasyWrapper &guard) = delete; + CurlEasyWrapper(CurlEasyWrapper &&) = delete; + CurlEasyWrapper &operator=(const CurlEasyWrapper &guard) = delete; + CurlEasyWrapper &operator=(CurlEasyWrapper &&) = delete; CURL *get() { return handle; } private: @@ -159,9 +165,9 @@ class CurlEasyWrapper { }; template -static void curlEasySetoptWrapper(CURL *curl_handle, CURLoption option, T &&... args) { +static void curlEasySetoptWrapper(CURL *curl_handle, CURLoption option, T &&...args) { const CURLcode retval = curl_easy_setopt(curl_handle, option, std::forward(args)...); - if (retval != 0u) { + if (retval != 0U) { throw std::runtime_error(std::string("curl_easy_setopt error: ") + curl_easy_strerror(retval)); } } @@ -169,22 +175,25 @@ static void curlEasySetoptWrapper(CURL *curl_handle, CURLoption option, T &&... // this is reference implementation of make_unique which is not yet included to C++11 namespace std_ { template -struct _Unique_if { - using _Single_object = std::unique_ptr; +struct _Unique_if { // NOLINT(bugprone-reserved-identifier,cert-dcl37-c,cert-dcl51-cpp) + using _Single_object = std::unique_ptr; // NOLINT(bugprone-reserved-identifier,cert-dcl37-c,cert-dcl51-cpp) }; template +// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays,bugprone-reserved-identifier,cert-dcl37-c,cert-dcl51-cpp) struct _Unique_if { + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays,bugprone-reserved-identifier,cert-dcl37-c,cert-dcl51-cpp) using _Unknown_bound = std::unique_ptr; }; template +// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays,bugprone-reserved-identifier,cert-dcl37-c,cert-dcl51-cpp) struct _Unique_if { - using _Known_bound = void; + using _Known_bound = void; // NOLINT(bugprone-reserved-identifier,cert-dcl37-c,cert-dcl51-cpp) }; template -typename _Unique_if::_Single_object make_unique(Args &&... args) { +typename _Unique_if::_Single_object make_unique(Args &&...args) { return std::unique_ptr(new T(std::forward(args)...)); } diff --git a/src/libaktualizr/utilities/utils_test.cc b/src/libaktualizr/utilities/utils_test.cc index be850f82f8..9cd7ac4bc4 100644 --- a/src/libaktualizr/utilities/utils_test.cc +++ b/src/libaktualizr/utilities/utils_test.cc @@ -12,7 +12,9 @@ #include #include +#include +#include "libaktualizr/types.h" #include "utilities/utils.h" bool CharOk(char c) { @@ -46,6 +48,23 @@ TEST(Utils, PrettyNameOk) { EXPECT_FALSE(PrettyNameOk("foo-bar-123&")); } +TEST(Utils, parseJSON) { + // this should not cause valgrind warnings + Utils::parseJSON(""); +} + +TEST(Utils, jsonToCanonicalStr) { + const std::string sample = " { \"b\": 0, \"a\": [1, 2, {}], \"0\": \"x\"}"; + Json::Value parsed; + + parsed = Utils::parseJSON(sample); + EXPECT_EQ(Utils::jsonToCanonicalStr(parsed), "{\"0\":\"x\",\"a\":[1,2,{}],\"b\":0}"); + + const std::string sample2 = "0"; + parsed = Utils::parseJSON(sample2); + EXPECT_EQ(Utils::jsonToCanonicalStr(parsed), "0"); +} + /* Read hardware info from the system. */ TEST(Utils, getHardwareInfo) { Json::Value hwinfo = Utils::getHardwareInfo(); @@ -137,9 +156,7 @@ TEST(Utils, Base64RoundTrip) { } } -/* - * Extract credentials from a provided archive. - */ +/* Extract credentials from a provided archive. */ TEST(Utils, ArchiveRead) { const std::string archive_path = "tests/test_data/credentials.zip"; @@ -152,7 +169,6 @@ TEST(Utils, ArchiveRead) { { std::ifstream as(archive_path, std::ios::binary | std::ios::in); EXPECT_FALSE(as.fail()); - std::string url = Utils::readFileFromArchive(as, "autoprov.url"); EXPECT_EQ(url.rfind("https://", 0), 0); } @@ -173,6 +189,33 @@ TEST(Utils, ArchiveWrite) { } } +/* Remove credentials from a provided archive. */ +TEST(Utils, ArchiveRemoveFile) { + const boost::filesystem::path old_path = "tests/test_data/credentials.zip"; + TemporaryDirectory temp_dir; + const boost::filesystem::path new_path = temp_dir.Path() / "credentials.zip"; + boost::filesystem::copy_file(old_path, new_path); + + Utils::removeFileFromArchive(new_path, "autoprov_credentials.p12"); + EXPECT_THROW(Utils::removeFileFromArchive(new_path, "bogus_filename"), std::runtime_error); + + { + std::ifstream as(new_path.c_str(), std::ios::binary | std::ios::in); + EXPECT_FALSE(as.fail()); + EXPECT_THROW(Utils::readFileFromArchive(as, "autoprov_credentials.p12"), std::runtime_error); + } + + // Make sure the URL is still there. That's the only file that should be left + // under normal circumstances. + { + std::ifstream as_old(old_path.c_str(), std::ios::binary | std::ios::in); + EXPECT_FALSE(as_old.fail()); + std::ifstream as_new(new_path.c_str(), std::ios::binary | std::ios::in); + EXPECT_FALSE(as_new.fail()); + EXPECT_EQ(Utils::readFileFromArchive(as_old, "autoprov.url"), Utils::readFileFromArchive(as_new, "autoprov.url")); + } +} + /* Create a temporary directory. */ TEST(Utils, TemporaryDirectory) { boost::filesystem::path p; @@ -292,33 +335,6 @@ TEST(Utils, writeFileJson) { EXPECT_EQ(result_json["key"].asString(), val["key"].asString()); } -TEST(Utils, ipUtils) { - int fd = socket(AF_INET6, SOCK_STREAM, 0); - - EXPECT_NE(fd, -1); - SocketHandle hdl(new int(fd)); - - sockaddr_in6 sa{}; - - memset(&sa, 0, sizeof(sa)); - sa.sin6_family = AF_INET6; - sa.sin6_port = htons(0); - sa.sin6_addr = IN6ADDR_ANY_INIT; - - int reuseaddr = 1; - if (setsockopt(*hdl, SOL_SOCKET, SO_REUSEADDR, &reuseaddr, sizeof(reuseaddr)) < 0) { - throw std::runtime_error("setsockopt(SO_REUSEADDR) failed"); - } - - EXPECT_NE(bind(*hdl, reinterpret_cast(&sa), sizeof(sa)), -1); - - sockaddr_storage ss{}; - EXPECT_NO_THROW(ss = Utils::ipGetSockaddr(*hdl)); - - EXPECT_NE(Utils::ipDisplayName(ss), "unknown"); - EXPECT_NE(Utils::ipPort(ss), -1); -} - TEST(Utils, shell) { std::string out; int statuscode = Utils::shell("ls /", &out); @@ -358,13 +374,13 @@ TEST(Utils, getDirEntriesByExt) { } TEST(Utils, BasedPath) { - BasedPath bp("a/test.xml"); + utils::BasedPath bp("a/test.xml"); - EXPECT_EQ(BasedPath(bp.get("")), bp); + EXPECT_EQ(utils::BasedPath(bp.get("")), bp); EXPECT_EQ(bp.get("/"), "/a/test.xml"); EXPECT_EQ(bp.get("/x"), "/x/a/test.xml"); - BasedPath abp("/a/test.xml"); + utils::BasedPath abp("/a/test.xml"); EXPECT_EQ(abp.get(""), "/a/test.xml"); EXPECT_EQ(abp.get("/root/var"), "/a/test.xml"); diff --git a/src/libaktualizr/utilities/xml2json.h b/src/libaktualizr/utilities/xml2json.h new file mode 100644 index 0000000000..8a53cdb213 --- /dev/null +++ b/src/libaktualizr/utilities/xml2json.h @@ -0,0 +1,101 @@ +#include +#include +#include +#include + +#include "json/json.h" + +namespace xml2json { + +static inline void addSubArray(Json::Value &d, const std::string &key, const Json::Value &arr) { + if (arr.empty()) { + return; + } else if (arr.size() == 1) { + d[key] = arr[0]; + } else { + d[key] = arr; + } +} + +static const int MAX_DEPTH = 10; + +// NOLINTNEXTLINE(misc-no-recursion) +static inline Json::Value treeJson(const boost::property_tree::ptree &tree, int depth = 0) { + namespace bpt = boost::property_tree; + + if (depth > MAX_DEPTH) { + throw std::runtime_error("parse error"); + } + + bool leaf = true; + Json::Value output; + + struct { + // used to collasce same-key children into lists + std::string key; + Json::Value list = Json::Value(Json::arrayValue); + } cur; + + for (auto it = tree.ordered_begin(); it != tree.not_found(); it++) { + const std::string &val = it->first; + const bpt::ptree &subtree = it->second; + leaf = false; + + // xml attributes + if (val == "") { + for (const bpt::ptree::value_type &attr : subtree) { + output[std::string("@") + attr.first] = attr.second.data(); + } + continue; + } + + if (cur.key.empty()) { + cur.key = val; + } else if (cur.key != val) { + addSubArray(output, cur.key, cur.list); + + cur.key = val; + cur.list = Json::Value(Json::arrayValue); + } + cur.list.append(treeJson(subtree, depth + 1)); + } + + if (!cur.key.empty()) { + addSubArray(output, cur.key, cur.list); + } + + { + auto val = tree.get_value_optional(); + if (!!val && !val.get().empty()) { + if (leaf) { + // c -> { "e": "c" } + return val.get(); + } else { + // c -> { "e": { "@a": "b", "#text": "c" } } + output["#text"] = val.get(); + } + } + } + + return output; +} + +// NOLINTNEXTLINE(clang-diagnostic-unused-function) +static inline Json::Value xml2json(std::istream &is) { + namespace bpt = boost::property_tree; + + try { + bpt::ptree pt; + bpt::read_xml(is, pt, bpt::xml_parser::trim_whitespace); + + if (pt.size() != 1) { + throw std::runtime_error("parse error"); + } + + return treeJson(pt); + } catch (std::exception &e) { + throw std::runtime_error("parse error"); + } +} + +} // namespace xml2json diff --git a/src/libaktualizr/utilities/xml2json_test.cc b/src/libaktualizr/utilities/xml2json_test.cc new file mode 100644 index 0000000000..93b857f567 --- /dev/null +++ b/src/libaktualizr/utilities/xml2json_test.cc @@ -0,0 +1,99 @@ +#include +#include + +#include "utilities/utils.h" +#include "utilities/xml2json.h" + +TEST(xml2json, simple) { + { + std::stringstream inxml(""); + auto j = xml2json::xml2json(inxml); + EXPECT_EQ(Utils::jsonToCanonicalStr(j), R"({"a":null})"); + } + { + std::stringstream inxml(R"()"); + auto j = xml2json::xml2json(inxml); + EXPECT_EQ(Utils::jsonToCanonicalStr(j), R"({"a":{"@b":"xxx"}})"); + } + { + std::stringstream inxml(R"()"); + auto j = xml2json::xml2json(inxml); + EXPECT_EQ(Utils::jsonToCanonicalStr(j), R"({"a":{"@b":"xxx","@c":"rrr"}})"); + } + { + std::stringstream inxml("xxx"); + auto j = xml2json::xml2json(inxml); + EXPECT_EQ(Utils::jsonToCanonicalStr(j), R"({"a":"xxx"})"); + } + { + std::stringstream inxml("xxx"); + auto j = xml2json::xml2json(inxml); + EXPECT_EQ(Utils::jsonToCanonicalStr(j), R"({"a":{"b":"xxx"}})"); + } + { + std::stringstream inxml("xxxyyy"); + auto j = xml2json::xml2json(inxml); + EXPECT_EQ(Utils::jsonToCanonicalStr(j), R"({"a":{"b":"xxx","c":"yyy"}})"); + } + { + std::stringstream inxml(R"(yy)"); + auto j = xml2json::xml2json(inxml); + EXPECT_EQ(Utils::jsonToCanonicalStr(j), R"({"a":{"#text":"yy","@xxx":"1"}})"); + } + { + std::stringstream inxml(R"(1xx2)"); + auto j = xml2json::xml2json(inxml); + EXPECT_EQ(Utils::jsonToCanonicalStr(j), R"({"a":{"b":["1","2"],"c":"xx"}})"); + } +} + +static const std::string example_manifest = R"( + + + + + + + + + + + + + + + + + + + +)"; + +static const std::string example_json = + R"({"manifest":{"default":{"@remote":"github","@revision":"master"},"project":[{"@name":"advancedtelematic/meta-updater","@path":"meta-updater","@revision":"6e1c9cf5cc59437ce07f5aec2dc62d665d218bdb","@upstream":"master"},{"@name":"advancedtelematic/meta-updater-minnowboard","@path":"meta-updater-minnowboard","@revision":"c822d05f860c3a2437236696b22ef7536c0a1311","@upstream":"master"},{"@name":"advancedtelematic/meta-updater-qemux86-64","@path":"meta-updater-qemux86-64","@revision":"162d1378659343a3ad34569c1315babe7246ec86","@upstream":"master"},{"@name":"advancedtelematic/meta-updater-raspberrypi","@path":"meta-updater-raspberrypi","@revision":"501156e6d12e3207a5acb611984dce1856a7729c","@upstream":"master"},{"@name":"meta-intel","@remote":"yocto","@revision":"eacd8eb9f762c90cec2825736e8c4d483966c4d4","@upstream":"master"},{"@name":"meta-openembedded","@remote":"openembedded","@revision":"18506b797bcfe162999223b79919e7c730875bb4","@upstream":"master"},{"@name":"meta-raspberrypi","@remote":"yocto","@revision":"254c9366b9c3309db6dc07beb80aba55e0c87f94","@upstream":"master"},{"@name":"poky","@remote":"yocto","@revision":"3a751d5564fc6ee9aef225653cc7b8630fd25a35","@upstream":"master"},{"@name":"ricardosalveti/meta-updater-riscv","@path":"meta-updater-riscv","@revision":"8164a21c04a7de91f90ada763104063540a84961","@upstream":"master"},{"@name":"riscv/meta-riscv","@path":"meta-riscv","@revision":"0ba537b9270046b1c08d2b2f1cc9a9ca96ea0328","@upstream":"master"}],"remote":[{"@fetch":"https://github.com","@name":"github","@pushurl":"ssh://git@github.com"},{"@fetch":"git://git.openembedded.org/","@name":"openembedded"},{"@fetch":"https://git.yoctoproject.org/git/","@name":"yocto"}]}})"; + +TEST(xml2json, manifest) { + std::stringstream inxml(example_manifest); + auto j = xml2json::xml2json(inxml); + EXPECT_EQ(Utils::jsonToCanonicalStr(j), example_json); +} + +TEST(xml2json, bad_input) { + { + // wrong xml + std::stringstream inxml("xxx"); + EXPECT_THROW(xml2json::xml2json(inxml), std::runtime_error); + } +} + +#ifndef __NO_MAIN__ +int main(int argc, char **argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} +#endif diff --git a/src/load_tests/CMakeLists.txt b/src/load_tests/CMakeLists.txt deleted file mode 100644 index 5c6b54b2ef..0000000000 --- a/src/load_tests/CMakeLists.txt +++ /dev/null @@ -1,44 +0,0 @@ -set(LOAD_TESTS_SRC main.cc - check.h check.cc - provision.h provision.cc - executor.h - context.h context.cc - stats.cc stats.h - sslinit.h sslinit.cc executor.cc) - -set(LT_TREEHUB_SRC treehub.cc treehub.h) - -if (BUILD_LOAD_TESTS) - - add_executable(ota-load-tests ${LOAD_TESTS_SRC}) - - if (BUILD_OSTREE) - target_sources(ota-load-tests PUBLIC ${LT_TREEHUB_SRC}) - endif (BUILD_OSTREE) - - add_dependencies(ota-load-tests aktualizr hdr_histogram) - - target_include_directories(ota-load-tests PUBLIC - ${PROJECT_SOURCE_DIR}/third_party/HdrHistogram_c/src) - - target_link_libraries(ota-load-tests - aktualizr_static_lib - hdr_histogram_static - ${Boost_LIBRARIES} - ${OPENSSL_LIBRARIES} - ${sodium_LIBRARY_RELEASE} - ${CMAKE_THREAD_LIBS_INIT} - ${CURL_LIBRARIES} - ${GLIB2_LIBRARIES} - ${LibArchive_LIBRARIES} - ${LIBOSTREE_LIBRARIES} - ${LIBP11_LIBRARIES} - ${SQLITE3_LIBRARIES} - ${SYSTEMD_LIBRARY}) - - install(TARGETS ota-load-tests - COMPONENT aktualizr - RUNTIME DESTINATION bin) -endif (BUILD_LOAD_TESTS) - -aktualizr_source_file_checks(${LOAD_TESTS_SRC} ${LT_TREEHUB_SRC}) diff --git a/src/load_tests/check.cc b/src/load_tests/check.cc deleted file mode 100644 index a3cbe79e70..0000000000 --- a/src/load_tests/check.cc +++ /dev/null @@ -1,105 +0,0 @@ -#include "check.h" - -#include -#include -#include -#include "utilities/utils.h" - -#include "context.h" -#include "executor.h" -#include "primary/events.h" -#include "primary/reportqueue.h" -#include "primary/sotauptaneclient.h" -#include "storage/invstorage.h" -#include "storage/sqlstorage.h" -#include "uptane/uptanerepository.h" - -namespace fs = boost::filesystem; - -class EphemeralStorage : public SQLStorage { - public: - EphemeralStorage(const StorageConfig &config, bool readonly) : SQLStorage(config, readonly) {} - void storeRoot(const std::string &data, Uptane::RepositoryType repo, Uptane::Version version) override { - (void)data; - (void)repo; - (void)version; - }; - void storeNonRoot(const std::string &data, Uptane::RepositoryType repo, Uptane::Role role) override { - (void)data; - (void)repo; - (void)role; - }; - - static std::shared_ptr newStorage(const StorageConfig &config) { - return std::make_shared(config, false); - } -}; - -class CheckForUpdate { - Config config; - - std::unique_ptr aktualizr_ptr; - - public: - CheckForUpdate(Config config_) : config{config_}, aktualizr_ptr{std_::make_unique(config)} { - try { - aktualizr_ptr->Initialize(); - } catch (...) { - LOG_ERROR << "Unable to initialize a device: " << config.storage.path; - } - } - - void operator()() { - LOG_DEBUG << "Updating a device in " << config.storage.path.native(); - try { - aktualizr_ptr->CheckUpdates().get(); - } catch (const Uptane::MissingRepo &e) { - LOG_DEBUG << e.what(); - } catch (const std::exception &e) { - LOG_ERROR << "Unable to get new targets: " << e.what(); - } catch (...) { - LOG_ERROR << "Unknown error occured while checking for updates"; - } - } -}; - -class CheckForUpdateTasks { - std::vector configs; - - std::mt19937 rng; - - std::uniform_int_distribution gen; - - public: - CheckForUpdateTasks(const boost::filesystem::path baseDir) - : configs{loadDeviceConfigurations(baseDir)}, gen(0UL, configs.size() - 1) { - std::random_device seedGen; - rng.seed(seedGen()); - } - - CheckForUpdate nextTask() { - auto srcConfig = configs[gen(rng)]; - auto config{srcConfig}; - config.storage.path = fs::temp_directory_path() / fs::unique_path(); - LOG_DEBUG << "Copy device " << srcConfig.storage.path << " into " << config.storage.path; - fs::create_directory(config.storage.path); - fs::permissions(config.storage.path, fs::remove_perms | fs::group_write | fs::others_write); - fs::copy_file(srcConfig.storage.sqldb_path.get(srcConfig.storage.path), - config.storage.sqldb_path.get(config.storage.path)); - return CheckForUpdate{config}; - } -}; - -void checkForUpdates(const boost::filesystem::path &baseDir, const unsigned int rate, const unsigned int nr, - const unsigned int parallelism) { - LOG_INFO << "Target rate: " << rate << "op/s, operations: " << nr << ", workers: " << parallelism; - std::vector feeds(parallelism, CheckForUpdateTasks{baseDir}); - std::unique_ptr execController; - if (nr == 0) { - execController = std_::make_unique(); - } else { - execController = std_::make_unique(nr); - } - Executor exec{feeds, rate, std::move(execController), "Check for updates"}; - exec.run(); -} diff --git a/src/load_tests/check.h b/src/load_tests/check.h deleted file mode 100644 index 903e587523..0000000000 --- a/src/load_tests/check.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef LT_CHECK_H_ -#define LT_CHECK_H_ - -#include -#include - -void checkForUpdates(const boost::filesystem::path &baseDir, const unsigned int rate, const unsigned int nr, - const unsigned int parallelism); - -#endif diff --git a/src/load_tests/context.cc b/src/load_tests/context.cc deleted file mode 100644 index 034b8fa736..0000000000 --- a/src/load_tests/context.cc +++ /dev/null @@ -1,25 +0,0 @@ -#include "context.h" -#include -#include "logging/logging.h" - -using namespace boost::filesystem; -namespace po = boost::program_options; - -Config configure(const path& cfgFile, const int logLevel) { - po::variables_map vm; - vm.insert(std::make_pair("loglevel", po::variable_value(logLevel, false))); - const std::vector configDirs{cfgFile}; - vm.insert(std::make_pair("config", po::variable_value(configDirs, false))); - po::notify(vm); - return Config{vm}; -} - -std::vector loadDeviceConfigurations(const path& baseDir) { - const int severity = loggerGetSeverity(); - std::vector configs; - for (directory_entry& x : directory_iterator(baseDir)) { - const path sotaToml = x / "sota.toml"; - configs.push_back(configure(sotaToml, severity)); - } - return configs; -} \ No newline at end of file diff --git a/src/load_tests/context.h b/src/load_tests/context.h deleted file mode 100644 index 937d3fb5b3..0000000000 --- a/src/load_tests/context.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef LT_CONTEXT_H_ -#define LT_CONTEXT_H_ - -#include -#include - -Config configure(const boost::filesystem::path& cfgFile, const int logLevel); - -std::vector loadDeviceConfigurations(const boost::filesystem::path& baseDir); - -#endif \ No newline at end of file diff --git a/src/load_tests/executor.cc b/src/load_tests/executor.cc deleted file mode 100644 index 1e5a5423aa..0000000000 --- a/src/load_tests/executor.cc +++ /dev/null @@ -1,3 +0,0 @@ -#include "executor.h" - -std::atomic_bool InterruptableExecutionController::interrupted{false}; diff --git a/src/load_tests/executor.h b/src/load_tests/executor.h deleted file mode 100644 index 5c542f075b..0000000000 --- a/src/load_tests/executor.h +++ /dev/null @@ -1,183 +0,0 @@ -#ifndef LT_EXECUTOR_H -#define LT_EXECUTOR_H - -#ifdef BUILD_OSTREE -#include -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include "logging/logging.h" -#include "stats.h" - -namespace timer = std::chrono; - -class ExecutionController { - public: - virtual ~ExecutionController() = default; - virtual void stop() = 0; - - virtual bool claim() = 0; -}; - -class UnboundedExecutionController : ExecutionController { - private: - std::atomic_bool stopped; - - public: - UnboundedExecutionController() : stopped{false} {} - - void stop() override { stopped = true; } - - bool claim() override { return !stopped; } -}; - -class FixedExecutionController : public ExecutionController { - private: - std::atomic_uint iterations; - - public: - FixedExecutionController(const unsigned int i) : iterations{i} {} - - void stop() override { iterations.store(0); } - - bool claim() override { - while (true) { - auto i = iterations.load(); - if (i == 0) { - return false; - } else if (iterations.compare_exchange_strong(i, i - 1)) { - return true; - } - } - } -}; - -class InterruptableExecutionController : public ExecutionController { - private: - std::atomic_bool stopped; - static std::atomic_bool interrupted; - static void handleSignal(int) { - LOG_INFO << "SIGINT received"; - interrupted = true; - } - - public: - InterruptableExecutionController() : stopped{false} { - std::signal(SIGINT, InterruptableExecutionController::handleSignal); - }; - - bool claim() override { return !(interrupted || stopped); } - - void stop() override { stopped = true; } -}; - -typedef timer::steady_clock::time_point TimePoint; -class TaskStartTimeCalculator { - TimePoint startTime; - const timer::duration taskInterval; - std::atomic_ulong taskIndex; - - public: - TaskStartTimeCalculator(const unsigned rate) : startTime{}, taskInterval{std::milli::den / rate}, taskIndex{0} {} - - void start() { startTime = timer::steady_clock::now(); } - - TimePoint operator()() { - auto i = ++taskIndex; - return startTime + taskInterval * i; - } -}; - -template -class Executor { - std::unique_ptr controller; - std::vector workers; - std::vector statistics; - TaskStartTimeCalculator calculateTaskStartTime; - boost::latch threadCountDown; - boost::latch starter; - const std::string label; - - void runWorker(TaskStream &tasks, Statistics &stats) { -#ifdef BUILD_OSTREE - GMainContext *thread_context = g_main_context_new(); - g_main_context_push_thread_default(thread_context); -#endif - using clock = std::chrono::steady_clock; - LOG_DEBUG << label << ": Worker created: " << std::this_thread::get_id(); - threadCountDown.count_down(); - starter.wait(); - while (controller->claim()) { - auto task = tasks.nextTask(); - const auto intendedStartTime = calculateTaskStartTime(); - if (timer::steady_clock::now() < intendedStartTime) { - std::this_thread::sleep_until(intendedStartTime); - } - const clock::time_point start = clock::now(); - task(); - const clock::time_point end = clock::now(); - std::chrono::milliseconds executionTime = std::chrono::duration_cast(end - start); - stats.recordSuccess(executionTime); - } - LOG_DEBUG << label << ": Worker finished execution: " << std::this_thread::get_id(); -#ifdef BUILD_OSTREE - g_main_context_pop_thread_default(thread_context); - g_main_context_unref(thread_context); -#endif - } - - public: - Executor(std::vector &feeds, const unsigned rate, std::unique_ptr ctrl, - const std::string lbl) - : controller{std::move(ctrl)}, - workers{}, - statistics(feeds.size()), - calculateTaskStartTime{rate}, - threadCountDown{feeds.size()}, - starter{1}, - label{lbl} { - workers.reserve(feeds.size()); - try { - for (size_t i = 0; i < feeds.size(); i++) { - workers.push_back(std::thread(&Executor::runWorker, this, std::ref(feeds[i]), std::ref(statistics[i]))); - } - } catch (...) { - controller->stop(); - throw; - } - }; - - Statistics run() { - Statistics summary{}; - // wait till all threads are crerated and ready to go - LOG_INFO << label << ": Waiting for threads to start"; - threadCountDown.wait(); - calculateTaskStartTime.start(); - summary.start(); - LOG_INFO << label << ": Starting tests"; - // start execution - starter.count_down(); - // wait till all threads finished execution - for (size_t i = 0; i < workers.size(); i++) { - if (workers[i].joinable()) { - workers[i].join(); - } - } - - summary.stop(); - for (size_t i = 0; i < statistics.size(); i++) { - summary += statistics[i]; - } - std::cout << "Results for: " << label << std::endl; - summary.print(); - return summary; - }; -}; - -#endif diff --git a/src/load_tests/main.cc b/src/load_tests/main.cc deleted file mode 100644 index 9364e12888..0000000000 --- a/src/load_tests/main.cc +++ /dev/null @@ -1,213 +0,0 @@ -#include -#include -#include -#include -#include - -#include "check.h" -#include "provision.h" -#include "sslinit.h" -#ifdef BUILD_OSTREE -#include "treehub.h" -#endif - -namespace bpo = boost::program_options; - -void checkForUpdatesCmd(const std::vector &opts) { - namespace fs = boost::filesystem; - unsigned int devicesPerSec; - unsigned int opsNr; - unsigned int parallelism; - std::string inputDir; - bpo::options_description description("Check for update options"); - // clang-format off - description.add_options() - ("inputdir,i", bpo::value(&inputDir)->required(), "path to the input data") - ("rate,r", bpo::value(&devicesPerSec)->default_value(5), "devices/sec") - ("number,n", bpo::value(&opsNr)->default_value(100), "number of operation to execute") - ("threads,t", bpo::value(¶llelism)->default_value(std::thread::hardware_concurrency()), "number of worker threads"); - // clang-format on - - bpo::variables_map vm; - bpo::store(bpo::command_line_parser(opts).options(description).run(), vm); - bpo::notify(vm); - - const fs::path inputPath(inputDir); - LOG_INFO << "Checking for updates..."; - checkForUpdates(inputPath, devicesPerSec, opsNr, parallelism); -} - -#ifdef BUILD_OSTREE -void fetchRemoteCmd(const std::vector &opts) { - std::string inputDir; - std::string outDir; - std::string branchName; - std::string remoteUrl; - unsigned int opsPerSec; - unsigned int opsNr; - unsigned int parallelism; - bpo::options_description description("Fetch from ostree"); - // clang-format off - description.add_options() - ("inputdir,i", bpo::value(&inputDir)->required(), "Directory containig provisioned devices.") - ("outputdir,o", bpo::value(&outDir)->required(), "Directory where repos will be created") - ("branch,b", bpo::value(&branchName)->required(), "Name of a branch to pull") - ("url,u", bpo::value(&remoteUrl)->required(), "Url of the repository") - ("number,n", bpo::value(&opsNr)->default_value(100), "number of operation to execute") - ("threads,t", bpo::value(¶llelism)->default_value(std::thread::hardware_concurrency()), "number of worker threads") - ("rate,r", bpo::value(&opsPerSec)->default_value(50), "repo pulls per second"); - // clang-format on - - bpo::variables_map vm; - bpo::store(bpo::command_line_parser(opts).options(description).run(), vm); - bpo::notify(vm); - - const boost::filesystem::path outputPath(outDir); - fetchFromOstree(inputDir, outputPath, branchName, remoteUrl, opsPerSec, opsNr, parallelism); -} - -void checkAndFetchCmd(const std::vector &opts) { - namespace fs = boost::filesystem; - unsigned int checkPerSec; - unsigned int checkNr; - unsigned int checkParallelism; - std::string branchName; - std::string remoteUrl; - unsigned int fetchesPerSec; - unsigned int fetchesNr; - unsigned int fetchesParallelism; - std::string inputDir; - std::string outDir; - bpo::options_description description("Check for update options"); - // clang-format off - description.add_options() - ("inputdir,i", bpo::value(&inputDir)->required(), "path to the input data") - ("outputdir,o", bpo::value(&outDir)->required(), "Directory where repos will be created") - ("branch,b", bpo::value(&branchName)->required(), "Name of a branch to pull") - ("url,u", bpo::value(&remoteUrl)->required(), "Url of the repository") - ("fn", bpo::value(&fetchesNr)->default_value(100), "number of fetches from treehub") - ("ft", bpo::value(&fetchesParallelism)->default_value(std::thread::hardware_concurrency()), "number of fetch worker threads") - ("fr", bpo::value(&fetchesPerSec)->default_value(50), "fetches per second") - ("cr", bpo::value(&checkPerSec)->default_value(5), "check for update/sec") - ("cn", bpo::value(&checkNr)->default_value(100), "number of checks to execute") - ("ct", bpo::value(&checkParallelism)->default_value(std::thread::hardware_concurrency()), "number of check worker threads"); - // clang-format on - - bpo::variables_map vm; - bpo::store(bpo::command_line_parser(opts).options(description).run(), vm); - bpo::notify(vm); - - const fs::path inputPath(inputDir); - const fs::path outputPath(outDir); - std::thread checkThread{checkForUpdates, std::ref(inputPath), checkPerSec, checkNr, checkParallelism}; - std::thread fetchThread{ - fetchFromOstree, std::ref(inputPath), std::ref(outputPath), std::ref(branchName), std::ref(remoteUrl), - fetchesPerSec, fetchesNr, fetchesParallelism}; - checkThread.join(); - fetchThread.join(); -} -#endif - -void provisionDevicesCmd(const std::vector &opts) { - using namespace boost::filesystem; - unsigned int devicesNr; - size_t parallelism; - uint devicesPerSec; - std::string outDir; - std::string pathToCredentials; - std::string gwUrl; - bpo::options_description description("Register devices"); - - // clang-format off - description.add_options() - ("outputdir,o", bpo::value(&outDir)->required(), "output directory") - ("gateway,g", bpo::value(&gwUrl)->required(), "url of the device gateway") - ("credentials,c", bpo::value(&pathToCredentials), "path to a provisioning credentials") - ("dev-number,n", bpo::value(&devicesNr)->default_value(100), "number of devices") - ("rate,r", bpo::value(&devicesPerSec)->default_value(2), "devices/sec") - ("threads,t", bpo::value(¶llelism)->default_value(std::thread::hardware_concurrency()), "number of worker threads"); - // clang-format on - - bpo::variables_map vm; - bpo::store(bpo::command_line_parser(opts).options(description).run(), vm); - bpo::notify(vm); - - const path devicesDir(outDir); - - const path credentialsFile(pathToCredentials); - mkDevices(devicesDir, credentialsFile, gwUrl, parallelism, devicesNr, devicesPerSec); -} - -void setLogLevel(const bpo::variables_map &vm) { - // set the log level from command line option - boost::log::trivial::severity_level severity = - static_cast(vm["loglevel"].as()); - if (severity < boost::log::trivial::trace) { - LOG_DEBUG << "Invalid log level"; - severity = boost::log::trivial::trace; - } - if (boost::log::trivial::fatal < severity) { - LOG_WARNING << "Invalid log level"; - severity = boost::log::trivial::fatal; - } - LoggerConfig loggerConfig{}; - loggerConfig.loglevel = severity; - logger_set_threshold(loggerConfig); -} - -int main(int argc, char *argv[]) { - std::srand(static_cast(std::time(0))); - - std::map)>> commands{{"provision", provisionDevicesCmd}, - {"check", checkForUpdatesCmd} -#ifdef BUILD_OSTREE - , - {"checkfetch", checkAndFetchCmd}, - {"fetch", fetchRemoteCmd} -#endif - }; - - std::stringstream acc; - bool first = true; - acc << "Supported tests: "; - for (auto const &elem : commands) { - if (!first) { - acc << ", "; - } else { - first = false; - }; - acc << elem.first; - }; - std::string supportedTests = acc.str(); - std::string cmd; - bpo::options_description description("OTA load tests"); - description.add_options()("help,h", "Show help message")("loglevel", bpo::value()->default_value(3), - "set log level 0-4 (trace, debug, warning, info, error)")( - "test", bpo::value(&cmd), supportedTests.c_str()); - - bpo::variables_map vm; - bpo::parsed_options parsed = bpo::command_line_parser(argc, argv).options(description).allow_unregistered().run(); - bpo::store(parsed, vm); - bpo::notify(vm); - - if (vm.count("help")) { - std::cout << description << std::endl; - return 0; - } - - logger_init(); - setLogLevel(&vm); - - if (vm.count("test")) { - auto fn = commands.find(cmd); - if (fn != commands.end()) { - openssl_callbacks_setup(); - std::vector unprocessedOptions = bpo::collect_unrecognized(parsed.options, bpo::include_positional); - fn->second(unprocessedOptions); - openssl_callbacks_cleanup(); - } else { - LOG_ERROR << supportedTests; - } - } - return 0; -} diff --git a/src/load_tests/provision.cc b/src/load_tests/provision.cc deleted file mode 100644 index 6476e6f7f0..0000000000 --- a/src/load_tests/provision.cc +++ /dev/null @@ -1,100 +0,0 @@ -#include "provision.h" - -#include -#include -#include -#include - -#include "config/config.h" -#include "context.h" -#include "executor.h" -#include "http/httpclient.h" -#include "logging/logging.h" -#include "primary/events.h" -#include "primary/reportqueue.h" -#include "primary/sotauptaneclient.h" -#include "uptane/uptanerepository.h" -#include "utilities/utils.h" - -using namespace boost::filesystem; -using ptree = boost::property_tree::ptree; - -path mkDeviceBaseDir(const boost::uuids::uuid &deviceId, const path &dstDir) { - path deviceBaseDir{dstDir}; - deviceBaseDir /= to_string(deviceId); - create_directory(deviceBaseDir); - permissions(deviceBaseDir, remove_perms | group_write | others_write); - return deviceBaseDir; -} - -path writeDeviceConfig(const ptree &cfgTemplate, const path &deviceBaseDir, const boost::uuids::uuid &deviceId) { - ptree deviceCfg{cfgTemplate}; - path cfgFilePath{deviceBaseDir}; - cfgFilePath /= "sota.toml"; - deviceCfg.put_child("storage.path", ptree("\"" + deviceBaseDir.native() + "\"")); - deviceCfg.put_child("storage.type", ptree("\"sqlite\"")); - deviceCfg.put_child("provision.primary_ecu_serial", ptree("\"" + to_string(deviceId) + "\"")); - boost::property_tree::ini_parser::write_ini(cfgFilePath.native(), deviceCfg); - return cfgFilePath; -} - -class ProvisionDeviceTask { - Config config; - std::unique_ptr aktualizr_ptr; - - public: - explicit ProvisionDeviceTask(const Config cfg) : config{cfg}, aktualizr_ptr{std_::make_unique(config)} { - logger_set_threshold(boost::log::trivial::severity_level::trace); - } - - ProvisionDeviceTask(const ProvisionDeviceTask &) = delete; - ProvisionDeviceTask(ProvisionDeviceTask &&) = default; - - void operator()() { - try { - aktualizr_ptr->Initialize(); - } catch (std::exception &err) { - LOG_ERROR << "Failed to register device " << config.storage.path << ": " << err.what(); - } catch (...) { - LOG_ERROR << "Failed to register device " << config.storage.path; - } - } -}; - -class ProvisionDeviceTaskStream { - boost::uuids::basic_random_generator gen; - const path &dstDir; - const ptree &cfgTemplate; - const int logLevel; - - public: - ProvisionDeviceTaskStream(const path &dstDir_, const ptree &ct, const int ll) - : gen{}, dstDir{dstDir_}, cfgTemplate{ct}, logLevel{ll} {} - - ProvisionDeviceTask nextTask() { - LOG_INFO << "Creating provision device task"; - const boost::uuids::uuid deviceId = gen(); - const path deviceBaseDir = mkDeviceBaseDir(deviceId, dstDir); - const path deviceCfgPath = writeDeviceConfig(cfgTemplate, deviceBaseDir, deviceId); - Config config = configure(deviceCfgPath, logLevel); - return ProvisionDeviceTask{config}; - } -}; - -void mkDevices(const path &dstDir, const path &bootstrapCredentials, const std::string &gw_uri, - const size_t parallelism, const unsigned int nr, const unsigned int rate) { - const int severity = loggerGetSeverity(); - ptree cfgTemplate{}; - cfgTemplate.put_child("tls.server", ptree("\"https://" + gw_uri + "\"")); - cfgTemplate.put_child("provision.server", ptree("\"https://" + gw_uri + "\"")); - cfgTemplate.put_child("provision.provision_path", ptree("\"" + bootstrapCredentials.native() + "\"")); - // cfgTemplate.put_child("pacman.sysroot", ptree("\"/sysroot\"")); - cfgTemplate.put_child("pacman.type", ptree("\"none\"")); - std::vector feeds; - for (size_t i = 0; i < parallelism; i++) { - feeds.emplace_back(dstDir, cfgTemplate, severity); - } - std::unique_ptr execController = std_::make_unique(nr); - Executor exec{feeds, rate, std::move(execController), "Provision"}; - exec.run(); -} diff --git a/src/load_tests/provision.h b/src/load_tests/provision.h deleted file mode 100644 index 7b48c21e83..0000000000 --- a/src/load_tests/provision.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef LT_PROVISION_H -#define LT_PROVISION_H - -#include - -void mkDevices(const boost::filesystem::path& dst_dir, const boost::filesystem::path& bootstrapCredentials, - const std::string& gw_uri, const size_t parallelism, const unsigned int nr, const unsigned int rate); - -#endif diff --git a/src/load_tests/sslinit.cc b/src/load_tests/sslinit.cc deleted file mode 100644 index ca7d39abeb..0000000000 --- a/src/load_tests/sslinit.cc +++ /dev/null @@ -1,74 +0,0 @@ -#include "sslinit.h" -#include -#include -#include - -#if AKTUALIZR_OPENSSL_PRE_11 -static pthread_mutex_t *lock_cs; -static long *lock_count; - -// static void pthreads_locking_callback(int mode, int type, const char *, int); -// static unsigned long pthreads_thread_id(void); - -void pthreads_locking_callback(int mode, int type, const char *, int) { -#if 0 - fprintf(stderr, "thread=%4d mode=%s lock=%s %s:%d\n", - CRYPTO_thread_id(), - (mode & CRYPTO_LOCK) ? "l" : "u", - (type & CRYPTO_READ) ? "r" : "w", file, line); -#endif -#if 0 - if (CRYPTO_LOCK_SSL_CERT == type) - fprintf(stderr, "(t,m,f,l) %ld %d %s %d\n", - CRYPTO_thread_id(), mode, file, line); -#endif - if (mode & CRYPTO_LOCK) { - pthread_mutex_lock(&(lock_cs[type])); - lock_count[type]++; - } else { - pthread_mutex_unlock(&(lock_cs[type])); - } -} - -unsigned long pthreads_thread_id(void) { - unsigned long ret; - - ret = (unsigned long)pthread_self(); - return (ret); -} - -void openssl_callbacks_setup(void) { - int i; - - lock_cs = - (pthread_mutex_t *)OPENSSL_malloc(static_cast((unsigned long)CRYPTO_num_locks() * sizeof(pthread_mutex_t))); - lock_count = (long *)OPENSSL_malloc(static_cast((unsigned long)CRYPTO_num_locks() * sizeof(long))); - if (!lock_cs || !lock_count) { - /* Nothing we can do about this...void function! */ - if (lock_cs) OPENSSL_free(lock_cs); - if (lock_count) OPENSSL_free(lock_count); - return; - } - for (i = 0; i < CRYPTO_num_locks(); i++) { - lock_count[i] = 0; - pthread_mutex_init(&(lock_cs[i]), NULL); - } - - CRYPTO_set_id_callback((unsigned long (*)())pthreads_thread_id); - CRYPTO_set_locking_callback(pthreads_locking_callback); -} - -void openssl_callbacks_cleanup(void) { - int i; - - CRYPTO_set_locking_callback(NULL); - for (i = 0; i < CRYPTO_num_locks(); i++) { - pthread_mutex_destroy(&(lock_cs[i])); - } - OPENSSL_free(lock_cs); - OPENSSL_free(lock_count); -} -#else -void openssl_callbacks_setup(void){}; -void openssl_callbacks_cleanup(void){}; -#endif diff --git a/src/load_tests/sslinit.h b/src/load_tests/sslinit.h deleted file mode 100644 index 7a2d79faa8..0000000000 --- a/src/load_tests/sslinit.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef LT_SSLINIT_H_ -#define LT_SSLINIT_H_ - -#ifdef __cplusplus -extern "C" { -#endif -void openssl_callbacks_setup(void); -void openssl_callbacks_cleanup(void); -#ifdef __cplusplus -} -#endif - -#endif diff --git a/src/load_tests/stats.cc b/src/load_tests/stats.cc deleted file mode 100644 index 86d4854e48..0000000000 --- a/src/load_tests/stats.cc +++ /dev/null @@ -1,32 +0,0 @@ -#include "stats.h" -#include -#include - -Histogram::Histogram() { hdr_init(1, 3L * 60 * 1000, 3, &histogram); } - -Histogram::~Histogram() { free(histogram); } -void Histogram::print() { hdr_percentiles_print(histogram, stdout, 5, 1.0, CLASSIC); } -Histogram &Histogram::operator+=(const Histogram &rhs) { - hdr_add(histogram, rhs.histogram); - return *this; -} -int64_t Histogram::totalCount() { return histogram->total_count; } - -void Statistics::recordSuccess(const std::chrono::milliseconds &duration) { successDurations.record(duration); } - -void Statistics::recordFailure(const std::chrono::milliseconds &duration) { errorDurations.record(duration); } - -void Statistics::print() { - successDurations.print(); - std::chrono::seconds elapsedTime = std::chrono::duration_cast(finishedAt - startedAt); - std::cout << "Elapsed time: " << elapsedTime.count() << "s "; - std::cout << "Rate: " << successDurations.totalCount() / elapsedTime.count() << "op/s"; -} - -Statistics &Statistics::operator+=(const Statistics &rhs) { - successDurations += rhs.successDurations; - return *this; -} - -void Statistics::start() { startedAt = clock::now(); } -void Statistics::stop() { finishedAt = clock::now(); } diff --git a/src/load_tests/stats.h b/src/load_tests/stats.h deleted file mode 100644 index bc84e62121..0000000000 --- a/src/load_tests/stats.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef LT_STATS_H -#define LT_STATS_H - -#include -#include - -class Histogram { - hdr_histogram* histogram; - - public: - Histogram(); - ~Histogram(); - void record(const std::chrono::milliseconds& duration) { hdr_record_value(histogram, duration.count()); }; - - Histogram& operator+=(const Histogram& rhs); - void print(); - int64_t totalCount(); -}; - -class Statistics { - using clock = std::chrono::steady_clock; - clock::time_point startedAt; - clock::time_point finishedAt; - Histogram successDurations; - Histogram errorDurations; - - public: - Statistics() : successDurations{}, errorDurations{} {} - - void recordSuccess(const std::chrono::milliseconds& duration); - void recordFailure(const std::chrono::milliseconds& durarion); - void start(); - void stop(); - void print(); - Statistics& operator+=(const Statistics& rhs); -}; - -#endif \ No newline at end of file diff --git a/src/load_tests/treehub.cc b/src/load_tests/treehub.cc deleted file mode 100644 index 19dad4bc8c..0000000000 --- a/src/load_tests/treehub.cc +++ /dev/null @@ -1,103 +0,0 @@ -#include "treehub.h" -#include -#include -#include -#include -#include -#include -#include "context.h" -#include "executor.h" - -namespace fs = boost::filesystem; - -class FetchTask { - const Config &config; - std::shared_ptr storage; - std::shared_ptr keys; - const fs::path repoDir; - const std::string branchName; - const std::string remoteUrl; - - GObjectUniquePtr repo; - - void initRepo() { - GFile *gRepoFile = g_file_new_for_path(repoDir.native().c_str()); - repo.reset(ostree_repo_new(gRepoFile)); - g_object_unref(gRepoFile); - if (!repo) { - throw std::runtime_error("Repo initialization failed"); - } - - if (!ostree_repo_create(repo.get(), OSTREE_REPO_MODE_ARCHIVE_Z2, NULL, NULL)) { - throw std::runtime_error("Unable to init repository"); - }; - - if (!OstreeManager::addRemote(repo.get(), remoteUrl, *keys)) { - throw std::runtime_error("Unable to add remote to the repository"); - }; - } - - public: - FetchTask(const Config &cfg, const fs::path rd, const std::string &bn, const std::string &rurl) - : config{cfg}, - storage{INvStorage::newStorage(config.storage)}, - keys{new KeyManager{storage, config.keymanagerConfig()}}, - repoDir{rd}, - branchName{bn}, - remoteUrl{rurl} { - keys->loadKeys(); - initRepo(); - } - - void operator()() { - GVariantBuilder builder; - GVariant *options; - const char *const refs[] = {branchName.c_str()}; - g_variant_builder_init(&builder, G_VARIANT_TYPE("a{sv}")); - g_variant_builder_add(&builder, "{s@v}", "flags", g_variant_new_variant(g_variant_new_int32(0))); - g_variant_builder_add(&builder, "{s@v}", "refs", g_variant_new_variant(g_variant_new_strv(refs, 1))); - options = g_variant_builder_end(&builder); - GError *error = NULL; - if (!ostree_repo_pull_with_options(repo.get(), remote, options, NULL, NULL, &error)) { - LOG_ERROR << "Failed to pull repo " << repoDir << ": " << error->message; - g_error_free(error); - error = NULL; - } - g_variant_unref(options); - } -}; - -class FetchFromOstreeTasks { - std::vector configs; - const fs::path outputDir; - const std::string &branchName; - const std::string &remoteUrl; - std::mt19937 rng; - std::uniform_int_distribution gen; - - public: - FetchFromOstreeTasks(const fs::path &baseDir, const fs::path &od, const std::string &bn, const std::string &ru) - : configs{loadDeviceConfigurations(baseDir)}, - outputDir{od}, - branchName{bn}, - remoteUrl{ru}, - gen(0UL, configs.size() - 1) { - std::random_device seedGen; - rng.seed(seedGen()); - } - - FetchTask nextTask() { - auto dstName = Utils::randomUuid(); - const fs::path repoDir = outputDir / dstName; - return FetchTask{configs[gen(rng)], repoDir, branchName, remoteUrl}; - } -}; - -void fetchFromOstree(const fs::path &baseDir, const fs::path &outputDir, const std::string &branchName, - const std::string &remoteUrl, const unsigned int rate, const unsigned int nr, - const unsigned int parallelism) { - std::vector feeds(parallelism, FetchFromOstreeTasks{baseDir, outputDir, branchName, remoteUrl}); - std::unique_ptr execController = std_::make_unique(nr); - Executor exec{feeds, rate, std::move(execController), "Fetch"}; - exec.run(); -} diff --git a/src/load_tests/treehub.h b/src/load_tests/treehub.h deleted file mode 100644 index 11fcb48035..0000000000 --- a/src/load_tests/treehub.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef LT_TREEHUB_H -#define LT_TREEHUB_H - -#include - -void fetchFromOstree(const boost::filesystem::path &baseDir, const boost::filesystem::path &outputDir, - const std::string &branchName, const std::string &remoteUrl, const unsigned int rate, - const unsigned int nr, const unsigned int parallelism); - -#endif // LT_TREEHUB_H \ No newline at end of file diff --git a/src/sota_tools/CMakeLists.txt b/src/sota_tools/CMakeLists.txt index 87a1ecbd49..9f90f90a6b 100644 --- a/src/sota_tools/CMakeLists.txt +++ b/src/sota_tools/CMakeLists.txt @@ -1,4 +1,3 @@ -include_directories("${PROJECT_SOURCE_DIR}/src/libaktualizr/third_party/jsoncpp") set(SOTA_TOOLS_LIB_SRC authenticate.cc check.cc @@ -16,34 +15,37 @@ set(SOTA_TOOLS_LIB_SRC server_credentials.cc treehub_server.cc) -if (BUILD_SOTA_TOOLS) - set(GARAGE_TOOLS_VERSION "${AKTUALIZR_VERSION}") - set_property(SOURCE garage_tools_version.cc PROPERTY COMPILE_DEFINITIONS GARAGE_TOOLS_VERSION="${GARAGE_TOOLS_VERSION}") - add_library(sota_tools_static_lib STATIC ${SOTA_TOOLS_LIB_SRC}) - target_include_directories(sota_tools_static_lib PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${PROJECT_SOURCE_DIR} ${GLIB2_INCLUDE_DIRS}) -endif (BUILD_SOTA_TOOLS) - - ##### garage-push targets set(GARAGE_PUSH_SRCS garage_push.cc) -set(SOTA_TOOLS_EXTERNAL_LIBS - ${Boost_SYSTEM_LIBRARIES} - ${Boost_LIBRARIES} - ${CMAKE_THREAD_LIBS_INIT} - ${LibArchive_LIBRARIES} - ${CURL_LIBRARIES} - ${OPENSSL_LIBRARIES} - ${sodium_LIBRARY_RELEASE} - ${GLIB2_LIBRARIES}) - if (BUILD_SOTA_TOOLS) - add_executable(garage-push ${GARAGE_PUSH_SRCS}) + set(GARAGE_TOOLS_VERSION "${AKTUALIZR_VERSION}") + set_property(SOURCE garage_tools_version.cc PROPERTY COMPILE_DEFINITIONS GARAGE_TOOLS_VERSION="${GARAGE_TOOLS_VERSION}") + add_library(sota_tools_lib SHARED ${SOTA_TOOLS_LIB_SRC}) + set_target_properties(sota_tools_lib PROPERTIES LIBRARY_OUTPUT_NAME sota_tools) + target_include_directories(sota_tools_lib PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${PROJECT_SOURCE_DIR} ${GLIB2_INCLUDE_DIRS}) + + # we link with aktualizr static lib here, to bundle everything in sota_tools_lib + target_link_libraries(sota_tools_lib + aktualizr_static_lib + ${Boost_SYSTEM_LIBRARIES} + ${Boost_LIBRARIES} + ${JSONCPP_LIBRARIES} + Threads::Threads + ${LibArchive_LIBRARIES} + ${CURL_LIBRARIES} + ${OPENSSL_LIBRARIES} + ${sodium_LIBRARY_RELEASE} + ${GLIB2_LIBRARIES} + ${LIBOSTREE_LIBRARIES}) + + install(TARGETS sota_tools_lib LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT garage_deploy) - target_link_libraries(garage-push sota_tools_static_lib aktualizr_static_lib ${SOTA_TOOLS_EXTERNAL_LIBS}) + add_executable(garage-push ${GARAGE_PUSH_SRCS}) + target_link_libraries(garage-push sota_tools_lib) - install(TARGETS garage-push RUNTIME DESTINATION bin COMPONENT garage_deploy) + install(TARGETS garage-push RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT garage_deploy) endif (BUILD_SOTA_TOOLS) @@ -54,9 +56,9 @@ set(GARAGE_CHECK_SRCS if (BUILD_SOTA_TOOLS) add_executable(garage-check ${GARAGE_CHECK_SRCS}) - target_link_libraries(garage-check sota_tools_static_lib aktualizr_static_lib ${SOTA_TOOLS_EXTERNAL_LIBS}) + target_link_libraries(garage-check sota_tools_lib) - install(TARGETS garage-check RUNTIME DESTINATION bin COMPONENT garage_deploy) + install(TARGETS garage-check RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT garage_deploy) endif (BUILD_SOTA_TOOLS) @@ -66,11 +68,11 @@ set(GARAGE_DEPLOY_SRCS if (BUILD_SOTA_TOOLS) add_executable(garage-deploy ${GARAGE_DEPLOY_SRCS}) - target_link_libraries(garage-deploy sota_tools_static_lib aktualizr_static_lib ${SOTA_TOOLS_EXTERNAL_LIBS}) + target_link_libraries(garage-deploy sota_tools_lib) add_dependencies(build_tests garage-deploy) - install(TARGETS garage-deploy RUNTIME DESTINATION bin COMPONENT garage_deploy) + install(TARGETS garage-deploy RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT garage_deploy) ### garage-sign targets @@ -87,19 +89,18 @@ if (BUILD_SOTA_TOOLS) endif() add_custom_target(garage-sign - COMMAND ${PROJECT_SOURCE_DIR}/scripts/get_garage_sign.py + COMMAND ${PROJECT_SOURCE_DIR}/scripts/get-garage-sign.py --output ${CMAKE_CURRENT_BINARY_DIR} ${GARAGE_SIGN_ARGS}) add_dependencies(garage-deploy garage-sign) - install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/garage-sign/bin/garage-sign DESTINATION bin COMPONENT garage_deploy) - install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/garage-sign/lib DESTINATION . COMPONENT garage_deploy) + install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/garage-sign/bin/garage-sign DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT garage_deploy) + install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/garage-sign/lib DESTINATION ${CMAKE_INSTALL_PREFIX} COMPONENT garage_deploy) endif (BUILD_SOTA_TOOLS) ##### For clang-format set(ALL_SOTA_TOOLS_HEADERS - accumulator.h authenticate.h check.h deploy.h @@ -132,61 +133,73 @@ endif(NOT BUILD_SOTA_TOOLS) ##### tests if (BUILD_SOTA_TOOLS) + + set(TEST_CERT_DIR ${PROJECT_BINARY_DIR}/sota_tools/certs) + set(TEST_CERTS "${TEST_CERT_DIR}/server.crt" "${TEST_CERT_DIR}/ca.crt" "${TEST_CERT_DIR}/good.zip" "${TEST_CERT_DIR}/bad.zip") + + add_custom_command(OUTPUT ${TEST_CERTS} + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/authentication/generate-certs.sh + ${TEST_CERT_DIR}) + add_custom_target(sota_tools_cert_generation - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/cert_generation/generate-zips.sh - ${PROJECT_BINARY_DIR}/sota_tools/certs) + DEPENDS ${TEST_CERTS}) + + # do not link tests with libaktualizr, but sota_tools_lib + list(REMOVE_ITEM TEST_LIBS aktualizr_lib) + list(INSERT TEST_LIBS 0 sota_tools_lib) ### common tests add_aktualizr_test(NAME sota_tools_auth_test SOURCES authenticate_test.cc - LIBRARIES sota_tools_static_lib aktualizr_static_lib PROJECT_WORKING_DIRECTORY ARGS ${PROJECT_BINARY_DIR}/sota_tools/certs) add_dependencies(t_sota_tools_auth_test sota_tools_cert_generation) add_aktualizr_test(NAME ostree_hash - LIBRARIES sota_tools_static_lib SOURCES ostree_hash_test.cc) add_aktualizr_test(NAME rate_controller - LIBRARIES sota_tools_static_lib SOURCES rate_controller_test.cc) add_aktualizr_test(NAME ostree_dir_repo SOURCES ostree_dir_repo_test.cc - LIBRARIES sota_tools_static_lib PROJECT_WORKING_DIRECTORY) add_aktualizr_test(NAME ostree_http_repo SOURCES ostree_http_repo_test.cc - LIBRARIES sota_tools_static_lib PROJECT_WORKING_DIRECTORY) add_aktualizr_test(NAME treehub_server SOURCES treehub_server_test.cc - LIBRARIES sota_tools_static_lib PROJECT_WORKING_DIRECTORY) add_aktualizr_test(NAME deploy SOURCES deploy_test.cc - LIBRARIES sota_tools_static_lib PROJECT_WORKING_DIRECTORY) add_aktualizr_test(NAME ostree_object SOURCES ostree_object_test.cc - LIBRARIES sota_tools_static_lib PROJECT_WORKING_DIRECTORY) ### garage-check tests + # Check the --help option works. + add_test(NAME garage-check-option-help + COMMAND garage-check --help) + + # Report version. + add_test(NAME garage-check-option-version + COMMAND garage-check --version) + set_tests_properties(garage-check-option-version PROPERTIES PASS_REGULAR_EXPRESSION "Current garage-check version is: ${AKTUALIZR_VERSION}") + # Verify that a commit exists in a remote repo. - # Get targets.json from images repository. + # Get targets.json from Image repository. # Find specified OSTree ref in targets.json. add_test(NAME check_not_expired_targets - COMMAND ${PROJECT_SOURCE_DIR}/tests/run_expired_test.sh 2019-11-17T23:58:40Z ${CMAKE_BINARY_DIR} + COMMAND ${PROJECT_SOURCE_DIR}/tests/run_expired_test.sh 2031-11-17T23:58:40Z ${CMAKE_BINARY_DIR} WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) # Verify that a commit exists in a remote repo. - # Get targets.json from images repository. + # Get targets.json from Image repository. # Abort if targets.json has expired. add_test(NAME check_expired_targets COMMAND ${PROJECT_SOURCE_DIR}/tests/run_expired_test.sh 2017-11-17T23:58:40Z ${CMAKE_BINARY_DIR} @@ -199,81 +212,92 @@ if (BUILD_SOTA_TOOLS) add_test(NAME garage-push-option-help COMMAND garage-push --help) + # Report version. + add_test(NAME garage-push-option-version + COMMAND garage-push --version) + set_tests_properties(garage-push-option-version PROPERTIES PASS_REGULAR_EXPRESSION "Current garage-push version is: ${AKTUALIZR_VERSION}") + # Abort when given bogus command line options. add_test(NAME garage-push-bad-option - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-bad-option $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-bad-option.sh $ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) # Abort when given a bogus OSTree ref. add_test(NAME garage-push-missing-ref - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-missing-ref $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-missing-ref.sh $ + ${PROJECT_SOURCE_DIR}/tests/test_data/credentials.zip WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/) # Abort when given nonexistent credentials. add_test(NAME garage-push-missing-credentials - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-missing-credentials $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-missing-credentials.sh $ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/) # Abort when given bogus credentials. add_test(NAME garage-push-invalid-credentials - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-invalid-credentials $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-invalid-credentials.sh $ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/) # Abort when given a bogus CA certificate. add_test(NAME garage-push-cacert-not-found - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-cacert-not-found $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-cacert-not-found.sh $ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/) # Recover from the server hanging on to connections. add_test(NAME garage-push-server-500 - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-server-500 $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-server-500.py $ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/sota_tools) # Recover from intermittent errors. add_test(NAME garage-push-server-500-every-10-request - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-server-error_every_10 $ 500 + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-server-error_every_10.py $ 500 WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/sota_tools) # Recover from intermittent errors. add_test(NAME garage-push-server-409-every-10-request - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-server-error_every_10 $ 409 + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-server-error_every_10.py $ 409 WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/sota_tools) # Abort when server becomes unresponsive. add_test(NAME garage-push-server-500_after_20 - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-server-500_after_20 $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-server-500_after_20.py $ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/sota_tools) # Abort if authorization fails. add_test(NAME garage-push-auth-plus-failure - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-auth-plus-failure $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-auth-plus-failure.sh $ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/) + # Abort if a local object is corrupt + add_test(NAME garage-push-upload-corrupt-object + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-upload-corrupt-object.py $ + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/sota_tools) + if(SOTA_PACKED_CREDENTIALS) # Support dry run with auth plus using a real server. add_test(NAME garage-push-dry-run - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-dry-run $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-dry-run.sh $ ${SOTA_PACKED_CREDENTIALS} master WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/) set_tests_properties(garage-push-dry-run PROPERTIES LABELS "credentials") # Parse OSTree ref or commit refhash. add_test(NAME garage-push-dry-run-commit - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-dry-run $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-dry-run.sh $ ${SOTA_PACKED_CREDENTIALS} 16ef2f2629dc9263fdf3c0f032563a2d757623bbc11cf99df25c3c3f258dccbe WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/) set_tests_properties(garage-push-dry-run-commit PROPERTIES LABELS "credentials") - # Support debug logging. - add_test(NAME garage-push-verbose-logging - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-verbose-logging $ + # Support trace logging. + add_test(NAME garage-push-trace-logging + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-trace-logging.sh $ ${SOTA_PACKED_CREDENTIALS} WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/) - set_tests_properties(garage-push-verbose-logging PROPERTIES LABELS "credentials") + set_tests_properties(garage-push-trace-logging PROPERTIES LABELS "credentials") if(STRACE) # Use a provided CA certificate. add_test(NAME garage-push-cacert-used - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-cacert-used $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-cacert-used.sh $ ${SOTA_PACKED_CREDENTIALS} ${STRACE} WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/tests/) set_tests_properties(garage-push-cacert-used PROPERTIES LABELS "credentials") endif(STRACE) @@ -288,63 +312,68 @@ if (BUILD_SOTA_TOOLS) # Report version. add_test(NAME garage-deploy-option-version COMMAND garage-deploy --version) - set_tests_properties(garage-deploy-option-version PROPERTIES PASS_REGULAR_EXPRESSION "Current garage-deploy version is:") + set_tests_properties(garage-deploy-option-version PROPERTIES PASS_REGULAR_EXPRESSION "Current garage-deploy version is: ${AKTUALIZR_VERSION}") # Abort when given bogus command line options. add_test(NAME garage-deploy-bad-option - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-bad-option $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-bad-option.sh $ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) # Support debug logging. - add_test(NAME garage-deploy-debug-level - COMMAND garage-deploy -f tests/sota_tools/auth_test_good.zip -p tests/sota_tools/auth_test_good.zip --commit 123 -h 3 --name 123 -v + add_test(NAME garage-deploy-debug-logging + COMMAND garage-deploy -f tests/test_data/credentials.zip -p tests/test_data/credentials.zip --commit 123 -h 3 --name 123 -v WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) - set_tests_properties(garage-deploy-debug-level PROPERTIES PASS_REGULAR_EXPRESSION "Debug level debugging enabled") + set_tests_properties(garage-deploy-debug-logging PROPERTIES PASS_REGULAR_EXPRESSION "Debug level debugging enabled" LABELS "credentials") # Support trace logging. - add_test(NAME garage-deploy-trace-level - COMMAND garage-deploy -f tests/sota_tools/auth_test_good.zip -p tests/sota_tools/auth_test_good.zip --commit 16ef2f2629dc9263fdf3c0f032563a2d757623bbc11cf99df25c3c3f258dccbe -h 3 --name 123 -v -v + add_test(NAME garage-deploy-trace-logging + COMMAND garage-deploy -f tests/test_data/credentials.zip -p tests/test_data/credentials.zip --commit 16ef2f2629dc9263fdf3c0f032563a2d757623bbc11cf99df25c3c3f258dccbe -h 3 --name 123 --loglevel 0 WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) - set_tests_properties(garage-deploy-trace-level PROPERTIES PASS_REGULAR_EXPRESSION "Trace level debugging enabled") + set_tests_properties(garage-deploy-trace-logging PROPERTIES PASS_REGULAR_EXPRESSION "Loglevel set to 0" LABELS "credentials") # Support dry run with local repos. add_test(NAME garage-deploy-dry-run - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-garage-deploy-dry-run $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-garage-deploy-dry-run.sh $ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) set_tests_properties(garage-deploy-dry-run PROPERTIES PASS_REGULAR_EXPRESSION "Dry run. No objects uploaded.") # Abort if credentials do not support offline signing. add_test(NAME garage-deploy-online-signing - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-garage-deploy-online-signing $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-garage-deploy-online-signing.sh $ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) set_tests_properties(garage-deploy-online-signing PROPERTIES PASS_REGULAR_EXPRESSION "Provided push credentials are missing required components to sign Targets metadata") # Abort if destination server is unavailable. add_test(NAME garage-deploy-upload-failed - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-garage-deploy-upload-failed $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-garage-deploy-upload-failed.sh $ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) set_tests_properties(garage-deploy-upload-failed PROPERTIES PASS_REGULAR_EXPRESSION "Upload to treehub failed") # Abort if commit is not present in source server. add_test(NAME garage-deploy-missing-commit - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-missing-commit $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-missing-commit.sh $ WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) # Abort when given bogus fetch credentials. add_test(NAME garage-deploy-missing-fetch-credentials - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-garage-deploy-missing-fetch-credentials $ - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) - - # Abort when given bogus push credentials. - add_test(NAME garage-deploy-missing-push-credentials - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-garage-deploy-missing-push-credentials $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-garage-deploy-missing-fetch-credentials.sh $ + tests/test_data/credentials.zip WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) if(SOTA_PACKED_CREDENTIALS) + # Abort when given bogus push credentials. + # Garage-deploy checks that the fetch credentials are valid before looking + # at the push credentials, so this requires real SOTA_PACKED_CREDENTIALS + add_test(NAME garage-deploy-missing-push-credentials + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-garage-deploy-missing-push-credentials.sh $ + ${SOTA_PACKED_CREDENTIALS} + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) + set_tests_properties(garage-deploy-missing-push-credentials PROPERTIES LABELS "credentials") + # Use garage-sign to offline sign targets for destination repository. # Remove local tuf repo generated by garage-sign after use. add_test(NAME garage-deploy-offline-signing - COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-garage-deploy-offline-signing $ + COMMAND ${PROJECT_SOURCE_DIR}/tests/sota_tools/test-garage-deploy-offline-signing.sh $ ${SOTA_PACKED_CREDENTIALS} WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) set_tests_properties(garage-deploy-offline-signing PROPERTIES PASS_REGULAR_EXPRESSION "Success" LABELS "credentials") endif(SOTA_PACKED_CREDENTIALS) diff --git a/src/sota_tools/accumulator.h b/src/sota_tools/accumulator.h deleted file mode 100644 index 694b2222dd..0000000000 --- a/src/sota_tools/accumulator.h +++ /dev/null @@ -1,104 +0,0 @@ -// accumulator.hpp header file -// -// (C) Copyright benjaminwolsey.de 2010-2011. Distributed under the Boost -// Software License, Version 1.0. (See accompanying file -// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -#ifndef PROGRAM_OPTIONS_ACCUMULATOR_HPP -#define PROGRAM_OPTIONS_ACCUMULATOR_HPP - -#include -#include -#include - -#include -#include - -/// An accumulating option value to handle multiple incrementing options. -template -class accumulator_type : public boost::program_options::value_semantic { - public: - explicit accumulator_type(T* store) : _store(store), _interval(1), _default(0) {} - - /// Set the notifier function. - accumulator_type* notifier(std::function f) { - _notifier = f; - return this; - } - - /// Set the default value for this option. - accumulator_type* default_value(const T& t) { - _default = t; - return this; - } - - /// Set the implicit value for this option. - // - /// Unlike for program_options::value, this specifies a value - /// to be applied on each occurrence of the option. - accumulator_type* implicit_value(const T& t) { - _interval = t; - return this; - } - - virtual std::string name() const { return std::string(); } // NOLINT - - /// There are no tokens for an accumulator_type - virtual unsigned min_tokens() const { return 0; } // NOLINT - virtual unsigned max_tokens() const { return 0; } // NOLINT - - virtual bool adjacent_tokens_only() const { return false; } // NOLINT - - /// Accumulating from different sources is silly. - virtual bool is_composing() const { return false; } // NOLINT - - /// Requiring one or more appearances is unlikely. - virtual bool is_required() const { return false; } // NOLINT - - /// Every appearance of the option simply increments the value - // - /// There should never be any tokens. - virtual void parse(boost::any& value_store, const std::vector&, bool /*utf8*/) const { // NOLINT - if (value_store.empty()) { - value_store = T(); - } - boost::any_cast(value_store) += _interval; - } - - /// If the option doesn't appear, this is the default value. - virtual bool apply_default(boost::any& value_store) const { // NOLINT - value_store = _default; - return true; - } - - /// Notify the user function with the value of the value store. - virtual void notify(const boost::any& value_store) const { // NOLINT - const auto* val = boost::any_cast(&value_store); - if (_store) { - *_store = *val; - } - if (_notifier) { - _notifier(*val); - } - } - - virtual ~accumulator_type() {} // NOLINT - - private: - T* _store; - std::function _notifier; - T _interval; - T _default; -}; - -template -accumulator_type* accumulator() { - return new accumulator_type(0); -} - -template -accumulator_type* accumulator(T* store) { - return new accumulator_type(store); -} - -#endif diff --git a/src/sota_tools/authenticate.cc b/src/sota_tools/authenticate.cc index 5fee75ed84..19ed2a8007 100644 --- a/src/sota_tools/authenticate.cc +++ b/src/sota_tools/authenticate.cc @@ -12,7 +12,7 @@ int authenticate(const string &cacerts, const ServerCredentials &creds, TreehubS break; } case AuthMethod::kOauth2: { - OAuth2 oauth2(creds.GetAuthServer(), creds.GetClientId(), creds.GetClientSecret(), cacerts); + OAuth2 oauth2(creds.GetAuthServer(), creds.GetClientId(), creds.GetClientSecret(), creds.GetScope(), cacerts); if (!creds.GetClientId().empty()) { if (oauth2.Authenticate() != AuthenticationResult::kSuccess) { diff --git a/src/sota_tools/authenticate_test.cc b/src/sota_tools/authenticate_test.cc index 8dca882dad..d0f4353e49 100644 --- a/src/sota_tools/authenticate_test.cc +++ b/src/sota_tools/authenticate_test.cc @@ -13,27 +13,16 @@ boost::filesystem::path certs_dir; -/* Authenticate with OAuth2. - * Parse authentication information from treehub.json. */ -TEST(authenticate, good_zip) { - // Authenticates with the ATS portal to the SaaS instance. - boost::filesystem::path filepath = "tests/sota_tools/auth_test_good.zip"; - ServerCredentials creds(filepath); - EXPECT_EQ(creds.GetMethod(), AuthMethod::kOauth2); - TreehubServer treehub; - int r = authenticate("", creds, treehub); - EXPECT_EQ(0, r); -} - /* Authenticate with TLS credentials. - * Parse images repository URL from a provided archive. */ + * Parse Image repository URL from a provided archive. */ TEST(authenticate, good_cert_zip) { // Authenticates with tls_server on port 1443. boost::filesystem::path filepath = certs_dir / "good.zip"; + boost::filesystem::path capath = certs_dir / "server.crt"; ServerCredentials creds(filepath); EXPECT_EQ(creds.GetMethod(), AuthMethod::kTls); TreehubServer treehub; - int r = authenticate("tests/fake_http_server/server.crt", creds, treehub); + int r = authenticate(capath.string(), creds, treehub); EXPECT_EQ(0, r); CurlEasyWrapper curl_handle; curlEasySetoptWrapper(curl_handle.get(), CURLOPT_VERBOSE, 1); @@ -44,14 +33,15 @@ TEST(authenticate, good_cert_zip) { /* Authenticate with nothing (no auth). * Parse authentication information from treehub.json. - * Parse images repository URL from a provided archive. */ + * Parse Image repository URL from a provided archive. */ TEST(authenticate, good_cert_noauth_zip) { // Authenticates with tls_noauth_server on port 2443. boost::filesystem::path filepath = "tests/sota_tools/auth_test_noauth_good.zip"; + boost::filesystem::path capath = certs_dir / "server.crt"; ServerCredentials creds(filepath); EXPECT_EQ(creds.GetMethod(), AuthMethod::kNone); TreehubServer treehub; - int r = authenticate("tests/fake_http_server/server.crt", creds, treehub); + int r = authenticate(capath.string(), creds, treehub); EXPECT_EQ(0, r); CurlEasyWrapper curl_handle; curlEasySetoptWrapper(curl_handle.get(), CURLOPT_VERBOSE, 1); @@ -93,15 +83,6 @@ TEST(authenticate, no_json_zip) { EXPECT_THROW(ServerCredentials creds(filepath), BadCredentialsContent); } -/* Extract credentials from a provided JSON file. */ -TEST(authenticate, good_json) { - // Authenticates with the ATS portal to the SaaS instance. - boost::filesystem::path filepath = "tests/sota_tools/auth_test_good.json"; - TreehubServer treehub; - int r = authenticate("", ServerCredentials(filepath), treehub); - EXPECT_EQ(0, r); -} - /* Reject a bogus provided JSON file. */ TEST(authenticate, bad_json) { boost::filesystem::path filepath = "tests/sota_tools/auth_test_bad.json"; @@ -142,8 +123,9 @@ int main(int argc, char **argv) { } certs_dir = argv[1]; - boost::process::child server_process("tests/fake_http_server/tls_server.py"); - boost::process::child server_noauth_process("tests/fake_http_server/tls_noauth_server.py"); + boost::process::child server_process("tests/sota_tools/authentication/tls_server.py", "1443", certs_dir); + boost::process::child server_noauth_process("tests/sota_tools/authentication/tls_server.py", "--noauth", "2443", + certs_dir); // TODO: this do not work because the server expects auth! Let's sleep for now. // (could be replaced by a check with raw tcp) // TestUtils::waitForServer("https://localhost:1443/"); diff --git a/src/sota_tools/check.cc b/src/sota_tools/check.cc index 7c75158995..9d161bca59 100644 --- a/src/sota_tools/check.cc +++ b/src/sota_tools/check.cc @@ -4,13 +4,13 @@ #include "check.h" #include "garage_common.h" #include "json/json.h" +#include "libaktualizr/types.h" #include "logging/logging.h" #include "ostree_http_repo.h" #include "ostree_object.h" #include "rate_controller.h" #include "request_pool.h" #include "treehub_server.h" -#include "utilities/types.h" #include "utilities/utils.h" // helper function to download data to a string @@ -23,7 +23,8 @@ static size_t writeString(void *contents, size_t size, size_t nmemb, void *userp return size * nmemb; } -int CheckRefValid(TreehubServer &treehub, const std::string &ref, RunMode mode, int max_curl_requests) { +int CheckRefValid(TreehubServer &treehub, const std::string &ref, RunMode mode, int max_curl_requests, + const boost::filesystem::path &tree_dir) { // Check if the ref is present on treehub. The traditional use case is that it // should be a commit object, but we allow walking the tree given any OSTree // ref. @@ -51,7 +52,7 @@ int CheckRefValid(TreehubServer &treehub, const std::string &ref, RunMode mode, LOG_FATAL << "OSTree commit " << ref << " is missing in treehub"; return EXIT_FAILURE; } else { - type = OstreeObjectType::OSTREE_OBJECT_TYPE_UNKNOWN; + type = OSTREE_OBJECT_TYPE_UNKNOWN; } } else if (http_code != 200) { LOG_FATAL << "Error " << http_code << " getting OSTree ref " << ref << " from treehub"; @@ -63,11 +64,11 @@ int CheckRefValid(TreehubServer &treehub, const std::string &ref, RunMode mode, if (mode == RunMode::kWalkTree) { // Walk the entire tree and check for all objects. - OSTreeHttpRepo dest_repo(&treehub); + OSTreeHttpRepo dest_repo(&treehub, tree_dir); OSTreeHash hash = OSTreeHash::Parse(ref); OSTreeObject::ptr input_object = dest_repo.GetObject(hash, type); - RequestPool request_pool(treehub, max_curl_requests, mode); + RequestPool request_pool(treehub, max_curl_requests, mode, false); // Add input object to the queue. request_pool.AddQuery(input_object); @@ -120,7 +121,7 @@ int CheckRefValid(TreehubServer &treehub, const std::string &ref, RunMode mode, } Json::Value target_list = targets_json["signed"]["targets"]; - for (Json::ValueIterator t_it = target_list.begin(); t_it != target_list.end(); t_it++) { + for (auto t_it = target_list.begin(); t_it != target_list.end(); t_it++) { if ((*t_it)["hashes"]["sha256"].asString() == ref) { LOG_INFO << "OSTree commit " << ref << " is found in targets.json"; return EXIT_SUCCESS; diff --git a/src/sota_tools/check.h b/src/sota_tools/check.h index 37b750f979..3b0f26e2b7 100644 --- a/src/sota_tools/check.h +++ b/src/sota_tools/check.h @@ -9,8 +9,9 @@ #include "server_credentials.h" /** - *Check if the ref is present on the server and in targets.json + * Check if the ref is present on the server and in targets.json */ -int CheckRefValid(TreehubServer& treehub, const std::string& ref, RunMode mode, int max_curl_requests); +int CheckRefValid(TreehubServer& treehub, const std::string& ref, RunMode mode, int max_curl_requests, + const boost::filesystem::path& tree_dir = ""); #endif diff --git a/src/sota_tools/deploy.cc b/src/sota_tools/deploy.cc index 9277777475..40a5baa731 100644 --- a/src/sota_tools/deploy.cc +++ b/src/sota_tools/deploy.cc @@ -24,7 +24,7 @@ bool CheckPoolState(const OSTreeObject::ptr &root_object, const RequestPool &req } bool UploadToTreehub(const OSTreeRepo::ptr &src_repo, TreehubServer &push_server, const OSTreeHash &ostree_commit, - const RunMode mode, const int max_curl_requests) { + const RunMode mode, const int max_curl_requests, const bool fsck_on_upload) { assert(max_curl_requests > 0); OSTreeObject::ptr root_object; @@ -35,7 +35,7 @@ bool UploadToTreehub(const OSTreeRepo::ptr &src_repo, TreehubServer &push_server return false; } - RequestPool request_pool(push_server, max_curl_requests, mode); + RequestPool request_pool(push_server, max_curl_requests, mode, fsck_on_upload); // Add commit object to the queue. request_pool.AddQuery(root_object); @@ -50,7 +50,9 @@ bool UploadToTreehub(const OSTreeRepo::ptr &src_repo, TreehubServer &push_server if (root_object->is_on_server() == PresenceOnServer::kObjectPresent) { if (mode == RunMode::kDefault || mode == RunMode::kPushTree) { - LOG_INFO << "Upload to Treehub complete after " << request_pool.total_requests_made() << " requests"; + LOG_INFO << "Upload to Treehub complete after " << request_pool.head_requests_made() << " HEAD requests and " + << request_pool.put_requests_made() << " PUT requests."; + LOG_INFO << "Total size of uploaded objects: " << request_pool.total_object_size() << " bytes."; } else { LOG_INFO << "Dry run. No objects uploaded."; } @@ -105,36 +107,20 @@ bool OfflineSignRepo(const ServerCredentials &push_credentials, const std::strin return true; } -bool PushRootRef(const ServerCredentials &push_credentials, const OSTreeRef &ref, const std::string &cacerts, - const RunMode mode) { - if (push_credentials.CanSignOffline()) { - // In general, this is the wrong thing. We should be using offline signing - // if private key material is present in credentials.zip - LOG_WARNING << "Pushing by refname despite that credentials.zip can be used to sign offline."; - } - - TreehubServer push_server; - - if (authenticate(cacerts, push_credentials, push_server) != EXIT_SUCCESS) { - LOG_FATAL << "Authentication failed"; +bool PushRootRef(const TreehubServer &push_server, const OSTreeRef &ref) { + CurlEasyWrapper easy_handle; + curlEasySetoptWrapper(easy_handle.get(), CURLOPT_VERBOSE, get_curlopt_verbose()); + ref.PushRef(push_server, easy_handle.get()); + CURLcode err = curl_easy_perform(easy_handle.get()); + if (err != 0U) { + LOG_ERROR << "Error pushing root ref: " << curl_easy_strerror(err); return false; } - - if (mode == RunMode::kDefault || mode == RunMode::kPushTree) { - CurlEasyWrapper easy_handle; - curlEasySetoptWrapper(easy_handle.get(), CURLOPT_VERBOSE, get_curlopt_verbose()); - ref.PushRef(push_server, easy_handle.get()); - CURLcode err = curl_easy_perform(easy_handle.get()); - if (err != 0u) { - LOG_ERROR << "Error pushing root ref: " << curl_easy_strerror(err); - return false; - } - long rescode; // NOLINT(google-runtime-int) - curl_easy_getinfo(easy_handle.get(), CURLINFO_RESPONSE_CODE, &rescode); - if (rescode != 200) { - LOG_ERROR << "Error pushing root ref, got " << rescode << " HTTP response"; - return false; - } + long rescode; // NOLINT(google-runtime-int) + curl_easy_getinfo(easy_handle.get(), CURLINFO_RESPONSE_CODE, &rescode); + if (rescode < 200 || rescode >= 400) { + LOG_ERROR << "Error pushing root ref, got " << rescode << " HTTP response"; + return false; } return true; diff --git a/src/sota_tools/deploy.h b/src/sota_tools/deploy.h index f257122304..213cfeff22 100644 --- a/src/sota_tools/deploy.h +++ b/src/sota_tools/deploy.h @@ -25,21 +25,21 @@ bool CheckPoolState(const OSTreeObject::ptr& root_object, const RequestPool& req * \param ostree_commit * \param mode * \param max_curl_requests + * \param fsck_on_upload Validate objects on disk before uploading them */ bool UploadToTreehub(const OSTreeRepo::ptr& src_repo, TreehubServer& push_server, const OSTreeHash& ostree_commit, - RunMode mode, int max_curl_requests); + RunMode mode, int max_curl_requests, bool fsck_on_upload); /** - * Use the garage-sign tool and the images targets.json keys in credentials.zip - * to add an entry to images/targets.json + * Use the garage-sign tool and the Image repo targets.json keys in credentials.zip + * to add an entry to the Image repo's targets.json */ bool OfflineSignRepo(const ServerCredentials& push_credentials, const std::string& name, const OSTreeHash& hash, const std::string& hardwareids); /** - * Update images/targets.json by pushing the OSTree commit hash to /refs/heads/qemux86-64 + * Update the ref on Treehub to the new commit. */ -bool PushRootRef(const ServerCredentials& push_credentials, const OSTreeRef& ref, const std::string& cacerts, - RunMode mode); +bool PushRootRef(const TreehubServer& push_server, const OSTreeRef& ref); #endif diff --git a/src/sota_tools/deploy_test.cc b/src/sota_tools/deploy_test.cc index fa0d405780..6d411a4882 100644 --- a/src/sota_tools/deploy_test.cc +++ b/src/sota_tools/deploy_test.cc @@ -29,13 +29,13 @@ TEST(deploy, UploadToTreehub) { 0xf9, 0x9d, 0xf2, 0x5c, 0x3c, 0x3f, 0x25, 0x8d, 0xcc, 0xbe}; TreehubServer push_server; EXPECT_EQ(authenticate(cert_path.string(), server_creds, push_server), EXIT_SUCCESS); - UploadToTreehub(src_repo, push_server, OSTreeHash(hash), run_mode, 2); + UploadToTreehub(src_repo, push_server, OSTreeHash(hash), run_mode, 2, true); int result = system( (std::string("diff -r ") + (temp_dir.Path() / "objects/").string() + " tests/sota_tools/repo/objects/").c_str()); EXPECT_EQ(result, 0) << "Diff between the source repo objects and the destination repo objects is nonzero."; - bool push_root_ref_res = PushRootRef(server_creds, test_ref, cert_path.string(), run_mode); + bool push_root_ref_res = PushRootRef(push_server, test_ref); EXPECT_TRUE(push_root_ref_res); result = diff --git a/src/sota_tools/garage_check.cc b/src/sota_tools/garage_check.cc index dfdb0da0ed..c5ab5c0cb7 100644 --- a/src/sota_tools/garage_check.cc +++ b/src/sota_tools/garage_check.cc @@ -3,19 +3,17 @@ #include #include #include -#include "json/json.h" -#include "accumulator.h" #include "authenticate.h" #include "check.h" #include "garage_common.h" #include "garage_tools_version.h" +#include "libaktualizr/types.h" #include "logging/logging.h" #include "ostree_http_repo.h" #include "ostree_object.h" #include "request_pool.h" #include "treehub_server.h" -#include "utilities/types.h" #include "utilities/utils.h" namespace po = boost::program_options; @@ -23,24 +21,26 @@ namespace po = boost::program_options; int main(int argc, char **argv) { logger_init(); - int verbosity; std::string ref; boost::filesystem::path credentials_path; std::string cacerts; int max_curl_requests; RunMode mode = RunMode::kDefault; + boost::filesystem::path tree_dir; po::options_description desc("garage-check command line options"); // clang-format off desc.add_options() ("help", "print usage") ("version", "Current garage-check version") - ("verbose,v", accumulator(&verbosity), "Verbose logging (use twice for more information)") - ("quiet,q", "Quiet mode") + ("verbose,v", "Verbose logging (loglevel 1)") + ("quiet,q", "Quiet mode (loglevel 3)") + ("loglevel", po::value(), "set log level 0-5 (trace, debug, info, warning, error, fatal)") ("ref,r", po::value(&ref)->required(), "refhash to check") ("credentials,j", po::value(&credentials_path)->required(), "credentials (json or zip containing json)") ("cacert", po::value(&cacerts), "override path to CA root certificates, in the same format as curl --cacert") ("jobs", po::value(&max_curl_requests)->default_value(30), "maximum number of parallel requests (only relevant with --walk-tree)") - ("walk-tree,w", "walk entire tree and check presence of all objects"); + ("walk-tree,w", "walk entire tree and check presence of all objects") + ("tree-dir,t", po::value(&tree_dir), "directory to which to write the tree (only used with --walk-tree)"); // clang-format on po::variables_map vm; @@ -48,7 +48,7 @@ int main(int argc, char **argv) { try { po::store(po::parse_command_line(argc, reinterpret_cast(argv), desc), vm); - if (vm.count("help") != 0u) { + if (vm.count("help") != 0U) { LOG_INFO << desc; return EXIT_SUCCESS; } @@ -64,27 +64,23 @@ int main(int argc, char **argv) { } try { - // Configure logging - if (verbosity == 0) { - // 'verbose' trumps 'quiet' - if (static_cast(vm.count("quiet")) != 0) { - logger_set_threshold(boost::log::trivial::warning); - } else { - logger_set_threshold(boost::log::trivial::info); - } - } else if (verbosity == 1) { + // Configure logging. Try loglevel first, then verbose, then quiet. + if (vm.count("loglevel") != 0) { + const int loglevel = vm["loglevel"].as(); + logger_set_threshold(static_cast(loglevel)); + LOG_INFO << "Loglevel set to " << loglevel; + } else if (static_cast(vm.count("verbose")) != 0) { logger_set_threshold(boost::log::trivial::debug); LOG_DEBUG << "Debug level debugging enabled"; - } else if (verbosity > 1) { - logger_set_threshold(boost::log::trivial::trace); - LOG_TRACE << "Trace level debugging enabled"; + } else if (static_cast(vm.count("quiet")) != 0) { + logger_set_threshold(boost::log::trivial::warning); } else { - assert(0); + logger_set_threshold(boost::log::trivial::info); } Utils::setUserAgent(std::string("garage-check/") + garage_tools_version()); - if (vm.count("walk-tree") != 0u) { + if (vm.count("walk-tree") != 0U) { mode = RunMode::kWalkTree; } @@ -99,7 +95,7 @@ int main(int argc, char **argv) { return EXIT_FAILURE; } - if (CheckRefValid(treehub, ref, mode, max_curl_requests) != EXIT_SUCCESS) { + if (CheckRefValid(treehub, ref, mode, max_curl_requests, tree_dir) != EXIT_SUCCESS) { LOG_FATAL << "Check if the ref is present on the server or in targets.json failed"; return EXIT_FAILURE; } diff --git a/src/sota_tools/garage_common.h b/src/sota_tools/garage_common.h index a02f8b21b4..fbe882bd3d 100644 --- a/src/sota_tools/garage_common.h +++ b/src/sota_tools/garage_common.h @@ -1,5 +1,8 @@ #ifndef GARAGE_COMMON_H_ #define GARAGE_COMMON_H_ + +#include "ostree-core.h" + /** \file */ /** Execution mode to run garage tools in. */ @@ -16,30 +19,17 @@ enum class RunMode { kPushTree, }; -/** Types of OSTree objects, borrowed from libostree/ostree-core.h. - * Copied here to avoid a dependency. We do not currently handle types 5-7, and - * UNKNOWN is our own invention for pseudo-backwards compatibility. +/* sota_tools was originally designed to not depend on OSTree. This was because + * libostree wasn't widely available in package managers so we depended on just + * glib. This header file used to contain a copy of the definition of + * OstreeObjectType from libostree/ostree-core.h, with an added entry for + * OSTREE_OBJECT_TYPE_UNKNOWN at position 0 (which wasn't defined at all in + * ostree-core.h). * - * OSTREE_OBJECT_TYPE_FILE: Content; regular file, symbolic link - * OSTREE_OBJECT_TYPE_DIR_TREE: List of children (trees or files), and metadata - * OSTREE_OBJECT_TYPE_DIR_META: Directory metadata - * OSTREE_OBJECT_TYPE_COMMIT: Toplevel object, refers to tree and dirmeta for root - * OSTREE_OBJECT_TYPE_TOMBSTONE_COMMIT: Toplevel object, refers to a deleted commit - * OSTREE_OBJECT_TYPE_COMMIT_META: Detached metadata for a commit - * OSTREE_OBJECT_TYPE_PAYLOAD_LINK: Symlink to a .file given its checksum on the payload only. - * - * Enumeration for core object types; %OSTREE_OBJECT_TYPE_FILE is for - * content, the other types are metadata. + * We now have a dependency on libostree, so this duplication both isn't needed + * any more, and breaks compilation (because of duplicate definitions). We + * still need OSTREE_OBJECT_TYPE_UNKNOWN in a few places, so define it here. */ -enum class OstreeObjectType { - OSTREE_OBJECT_TYPE_UNKNOWN = 0, - OSTREE_OBJECT_TYPE_FILE = 1, /* .file */ - OSTREE_OBJECT_TYPE_DIR_TREE = 2, /* .dirtree */ - OSTREE_OBJECT_TYPE_DIR_META = 3, /* .dirmeta */ - OSTREE_OBJECT_TYPE_COMMIT = 4, /* .commit */ - OSTREE_OBJECT_TYPE_TOMBSTONE_COMMIT = 5, /* .commit-tombstone */ - OSTREE_OBJECT_TYPE_COMMIT_META = 6, /* .commitmeta */ - OSTREE_OBJECT_TYPE_PAYLOAD_LINK = 7, /* .payload-link */ -}; +const OstreeObjectType OSTREE_OBJECT_TYPE_UNKNOWN = static_cast(0); #endif // GARAGE_COMMON_H_ diff --git a/src/sota_tools/garage_deploy.cc b/src/sota_tools/garage_deploy.cc index daf083c491..526615e2ce 100644 --- a/src/sota_tools/garage_deploy.cc +++ b/src/sota_tools/garage_deploy.cc @@ -1,9 +1,9 @@ +#include #include #include #include -#include "accumulator.h" #include "authenticate.h" #include "check.h" #include "deploy.h" @@ -17,7 +17,8 @@ namespace po = boost::program_options; int main(int argc, char **argv) { logger_init(); - int verbosity; + auto start_time = std::chrono::system_clock::now(); + std::string ostree_commit; std::string name; boost::filesystem::path fetch_cred; @@ -31,8 +32,9 @@ int main(int argc, char **argv) { desc.add_options() ("help", "print usage") ("version", "Current garage-deploy version") - ("verbose,v", accumulator(&verbosity), "Verbose logging (use twice for more information)") - ("quiet,q", "Quiet mode") + ("verbose,v", "Verbose logging (loglevel 1)") + ("quiet,q", "Quiet mode (loglevel 3)") + ("loglevel", po::value(), "set log level 0-5 (trace, debug, info, warning, error, fatal)") ("commit", po::value(&ostree_commit)->required(), "OSTree commit to deploy") ("name", po::value(&name)->required(), "Name of image") ("fetch-credentials,f", po::value(&fetch_cred)->required(), "path to source credentials") @@ -40,7 +42,8 @@ int main(int argc, char **argv) { ("hardwareids,h", po::value(&hardwareids)->required(), "list of hardware ids") ("cacert", po::value(&cacerts), "override path to CA root certificates, in the same format as curl --cacert") ("jobs", po::value(&max_curl_requests)->default_value(30), "maximum number of parallel requests") - ("dry-run,n", "check arguments and authenticate but don't upload"); + ("dry-run,n", "check arguments and authenticate but don't upload") + ("disable-integrity-checks", "Don't validate the checksums of objects before uploading them"); // clang-format on po::variables_map vm; @@ -48,7 +51,7 @@ int main(int argc, char **argv) { try { po::store(po::parse_command_line(argc, reinterpret_cast(argv), desc), vm); - if (vm.count("help") != 0u) { + if (vm.count("help") != 0U) { LOG_INFO << desc; return EXIT_SUCCESS; } @@ -63,27 +66,28 @@ int main(int argc, char **argv) { return EXIT_FAILURE; } - // Configure logging - if (verbosity == 0) { - // 'verbose' trumps 'quiet' - if (static_cast(vm.count("quiet")) != 0) { + // Configure logging. Try loglevel first, then verbose, then quiet. + try { + if (vm.count("loglevel") != 0) { + const int loglevel = vm["loglevel"].as(); + logger_set_threshold(static_cast(loglevel)); + LOG_INFO << "Loglevel set to " << loglevel; + } else if (static_cast(vm.count("verbose")) != 0) { + logger_set_threshold(boost::log::trivial::debug); + LOG_DEBUG << "Debug level debugging enabled"; + } else if (static_cast(vm.count("quiet")) != 0) { logger_set_threshold(boost::log::trivial::warning); } else { logger_set_threshold(boost::log::trivial::info); } - } else if (verbosity == 1) { - logger_set_threshold(boost::log::trivial::debug); - LOG_DEBUG << "Debug level debugging enabled"; - } else if (verbosity > 1) { - logger_set_threshold(boost::log::trivial::trace); - LOG_TRACE << "Trace level debugging enabled"; - } else { - assert(0); + } catch (std::exception &e) { + LOG_FATAL << e.what(); + return EXIT_FAILURE; } Utils::setUserAgent(std::string("garage-deploy/") + garage_tools_version()); - if (vm.count("dry-run") != 0u) { + if (vm.count("dry-run") != 0U) { mode = RunMode::kDryRun; } @@ -109,10 +113,11 @@ int main(int argc, char **argv) { OSTreeRepo::ptr src_repo = std::make_shared(&fetch_server); try { OSTreeHash commit(OSTreeHash::Parse(ostree_commit)); + bool fsck = vm.count("disable-integrity-checks") == 0; // Since the fetches happen on a single thread in OSTreeHttpRepo, there // isn't much reason to upload in parallel, but why hold the system back if // the fetching is faster than the uploading? - if (!UploadToTreehub(src_repo, push_server, commit, mode, max_curl_requests)) { + if (!UploadToTreehub(src_repo, push_server, commit, mode, max_curl_requests, fsck)) { LOG_FATAL << "Upload to treehub failed"; return EXIT_FAILURE; } @@ -138,6 +143,10 @@ int main(int argc, char **argv) { return EXIT_FAILURE; } + auto end_time = std::chrono::system_clock::now(); + std::chrono::duration diff_time = end_time - start_time; + LOG_INFO << "Total runtime: " << diff_time.count() << " seconds."; + return EXIT_SUCCESS; } // vim: set tabstop=2 shiftwidth=2 expandtab: diff --git a/src/sota_tools/garage_push.cc b/src/sota_tools/garage_push.cc index 73aad97014..e168a9b3d8 100644 --- a/src/sota_tools/garage_push.cc +++ b/src/sota_tools/garage_push.cc @@ -3,7 +3,6 @@ #include #include -#include "accumulator.h" #include "authenticate.h" #include "deploy.h" #include "garage_common.h" @@ -11,17 +10,18 @@ #include "logging/logging.h" #include "ostree_dir_repo.h" #include "ostree_repo.h" +#include "utilities/xml2json.h" namespace po = boost::program_options; int main(int argc, char **argv) { logger_init(); - int verbosity; boost::filesystem::path repo_path; std::string ref; boost::filesystem::path credentials_path; std::string cacerts; + boost::filesystem::path manifest_path; int max_curl_requests; RunMode mode = RunMode::kDefault; po::options_description desc("garage-push command line options"); @@ -29,15 +29,18 @@ int main(int argc, char **argv) { desc.add_options() ("help", "print usage") ("version", "Current garage-push version") - ("verbose,v", accumulator(&verbosity), "Verbose logging (use twice for more information)") - ("quiet,q", "Quiet mode") + ("verbose,v", "Verbose logging (loglevel 1)") + ("quiet,q", "Quiet mode (loglevel 3)") + ("loglevel", po::value(), "set log level 0-5 (trace, debug, info, warning, error, fatal)") ("repo,C", po::value(&repo_path)->required(), "location of OSTree repo") ("ref,r", po::value(&ref)->required(), "OSTree ref to push (or commit refhash)") ("credentials,j", po::value(&credentials_path)->required(), "credentials (json or zip containing json)") ("cacert", po::value(&cacerts), "override path to CA root certificates, in the same format as curl --cacert") + ("repo-manifest", po::value(&manifest_path), "manifest describing repository branches used in the image, to be sent as attached metadata") ("jobs", po::value(&max_curl_requests)->default_value(30), "maximum number of parallel requests") ("dry-run,n", "check arguments and authenticate but don't upload") - ("walk-tree,w", "walk entire tree and upload all missing objects"); + ("walk-tree,w", "walk entire tree and upload all missing objects") + ("disable-integrity-checks", "Don't validate the checksums of objects before uploading them"); // clang-format on po::variables_map vm; @@ -45,7 +48,7 @@ int main(int argc, char **argv) { try { po::store(po::parse_command_line(argc, reinterpret_cast(argv), desc), vm); - if (vm.count("help") != 0u) { + if (vm.count("help") != 0U) { LOG_INFO << desc; return EXIT_SUCCESS; } @@ -60,37 +63,38 @@ int main(int argc, char **argv) { return EXIT_FAILURE; } - // Configure logging - if (verbosity == 0) { - // 'verbose' trumps 'quiet' - if (static_cast(vm.count("quiet")) != 0) { + // Configure logging. Try loglevel first, then verbose, then quiet. + try { + if (vm.count("loglevel") != 0) { + const int loglevel = vm["loglevel"].as(); + logger_set_threshold(static_cast(loglevel)); + LOG_INFO << "Loglevel set to " << loglevel; + } else if (static_cast(vm.count("verbose")) != 0) { + logger_set_threshold(boost::log::trivial::debug); + LOG_DEBUG << "Debug level debugging enabled"; + } else if (static_cast(vm.count("quiet")) != 0) { logger_set_threshold(boost::log::trivial::warning); } else { logger_set_threshold(boost::log::trivial::info); } - } else if (verbosity == 1) { - logger_set_threshold(boost::log::trivial::debug); - LOG_DEBUG << "Debug level debugging enabled"; - } else if (verbosity > 1) { - logger_set_threshold(boost::log::trivial::trace); - LOG_TRACE << "Trace level debugging enabled"; - } else { - assert(0); + } catch (std::exception &e) { + LOG_FATAL << e.what(); + return EXIT_FAILURE; } Utils::setUserAgent(std::string("garage-push/") + garage_tools_version()); - if (cacerts != "") { + if (!cacerts.empty()) { if (!boost::filesystem::exists(cacerts)) { LOG_FATAL << "--cacert path " << cacerts << " does not exist"; return EXIT_FAILURE; } } - if (vm.count("dry-run") != 0u) { + if (vm.count("dry-run") != 0U) { mode = RunMode::kDryRun; } - if (vm.count("walk-tree") != 0u) { + if (vm.count("walk-tree") != 0U) { // If --walk-tree and --dry-run were provided, walk but do not push. if (mode == RunMode::kDryRun) { mode = RunMode::kWalkTree; @@ -132,19 +136,49 @@ int main(int argc, char **argv) { LOG_FATAL << "Authentication with push server failed"; return EXIT_FAILURE; } - if (!UploadToTreehub(src_repo, push_server, *commit, mode, max_curl_requests)) { + bool fsck = vm.count("disable-integrity-checks") == 0; + if (!UploadToTreehub(src_repo, push_server, *commit, mode, max_curl_requests, fsck)) { LOG_FATAL << "Upload to treehub failed"; return EXIT_FAILURE; } - if (push_credentials.CanSignOffline()) { - LOG_INFO << "Credentials contain offline signing keys. Use garage-sign to push root ref"; - } else if (!is_ref) { - LOG_INFO << "Provided ref " << ref << " is a commit refhash. Cannot push root ref"; - } else { - if (!PushRootRef(push_credentials, ostree_ref, cacerts, mode)) { - LOG_FATAL << "Error pushing root ref to treehub"; - return EXIT_FAILURE; + if (mode != RunMode::kDryRun) { + if (is_ref) { + if (!PushRootRef(push_server, ostree_ref)) { + LOG_FATAL << "Error pushing root ref to treehub"; + return EXIT_FAILURE; + } + } else { + LOG_INFO << "Provided ref " << ref << " is a commit refhash. Cannot push root ref"; + } + } + + if (!manifest_path.empty()) { + try { + std::string manifest_json_str; + std::ifstream ifs(manifest_path.string()); + std::stringstream ss; + auto manifest_json = xml2json::xml2json(ifs); + ss << manifest_json; + manifest_json_str = ss.str(); + + LOG_INFO << "Sending manifest:\n" << manifest_json_str; + if (mode != RunMode::kDryRun) { + CURL *curl = curl_easy_init(); + curl_easy_setopt(curl, CURLOPT_VERBOSE, 1); + push_server.SetContentType("Content-Type: application/json"); + push_server.InjectIntoCurl("manifests/" + commit->string(), curl); + curlEasySetoptWrapper(curl, CURLOPT_CUSTOMREQUEST, "PUT"); + curlEasySetoptWrapper(curl, CURLOPT_POSTFIELDS, manifest_json_str.c_str()); + CURLcode rc = curl_easy_perform(curl); + + if (rc != CURLE_OK) { + LOG_ERROR << "Error pushing repo manifest to Treehub"; + } + curl_easy_cleanup(curl); + } + } catch (std::exception &e) { + LOG_ERROR << "Could not send repo manifest to Treehub"; } } } catch (const BadCredentialsArchive &e) { diff --git a/src/sota_tools/oauth2.cc b/src/sota_tools/oauth2.cc index 811293ea53..d89daa6716 100644 --- a/src/sota_tools/oauth2.cc +++ b/src/sota_tools/oauth2.cc @@ -1,5 +1,3 @@ -#include - #include #include @@ -26,9 +24,30 @@ size_t curl_handle_write_sstream(void *buffer, size_t size, size_t nmemb, void * AuthenticationResult OAuth2::Authenticate() { CurlEasyWrapper curl_handle; + std::string token_suffix = "/token"; + std::string post_data = "grant_type=client_credentials"; + auto use_cognito = false; + if (server_.length() >= token_suffix.length()) { + use_cognito = (0 == server_.compare(server_.length() - token_suffix.length(), token_suffix.length(), token_suffix)); + } + curlEasySetoptWrapper(curl_handle.get(), CURLOPT_VERBOSE, get_curlopt_verbose()); - curlEasySetoptWrapper(curl_handle.get(), CURLOPT_URL, (server_ + "/token").c_str()); - if (ca_certs_ != "") { + // We need this check for backwards-compatibility with previous versions of treehub.json. + // Previous versions have the server URL *without* the token path, so it needs to be hardcoded. + // The new version has the full URL with the `/oauth2/token` path at the end, so nothing needs + // to be appended. Also we can't send the `scope` to Auth+, it confuses it. + // This check can be removed after we finish the migration to Cognito. At that point there's + // no need to attempt to be backwards-compatible anymore, since old credentials will have a + // client_id that will have been removed from user-profile, and a server URL to Auth+, which + // will be in computer heaven. + // check similar implementation in Scala for garage-sign here: + // https://github.com/uptane/ota-tuf/blob/master/cli/src/main/scala/com/advancedtelematic/tuf/cli/http/OAuth2Client.scala + if (use_cognito) { + curlEasySetoptWrapper(curl_handle.get(), CURLOPT_URL, (server_).c_str()); + } else { + curlEasySetoptWrapper(curl_handle.get(), CURLOPT_URL, (server_ + token_suffix).c_str()); + } + if (!ca_certs_.empty()) { curlEasySetoptWrapper(curl_handle.get(), CURLOPT_CAINFO, ca_certs_.c_str()); curlEasySetoptWrapper(curl_handle.get(), CURLOPT_CAPATH, NULL); } @@ -37,7 +56,11 @@ AuthenticationResult OAuth2::Authenticate() { curlEasySetoptWrapper(curl_handle.get(), CURLOPT_USERNAME, client_id_.c_str()); curlEasySetoptWrapper(curl_handle.get(), CURLOPT_PASSWORD, client_secret_.c_str()); curlEasySetoptWrapper(curl_handle.get(), CURLOPT_POST, 1); - curlEasySetoptWrapper(curl_handle.get(), CURLOPT_COPYPOSTFIELDS, "grant_type=client_credentials"); + if (use_cognito) { + curlEasySetoptWrapper(curl_handle.get(), CURLOPT_COPYPOSTFIELDS, (post_data + "&scope=" + scope_).c_str()); + } else { + curlEasySetoptWrapper(curl_handle.get(), CURLOPT_COPYPOSTFIELDS, post_data.c_str()); + } stringstream body; curlEasySetoptWrapper(curl_handle.get(), CURLOPT_WRITEFUNCTION, &curl_handle_write_sstream); @@ -59,7 +82,6 @@ AuthenticationResult OAuth2::Authenticate() { return AuthenticationResult::kFailure; } } else { - // TODO: , be more specfic about the failure cases return AuthenticationResult::kFailure; } } diff --git a/src/sota_tools/oauth2.h b/src/sota_tools/oauth2.h index 45dc40c0f6..0f40cb35b6 100644 --- a/src/sota_tools/oauth2.h +++ b/src/sota_tools/oauth2.h @@ -11,10 +11,11 @@ class OAuth2 { /** * Doesn't perform any authentication */ - OAuth2(std::string server, std::string client_id, std::string client_secret, std::string ca_certs) + OAuth2(std::string server, std::string client_id, std::string client_secret, std::string scope, std::string ca_certs) : server_(std::move(server)), client_id_(std::move(client_id)), client_secret_(std::move(client_secret)), + scope_(std::move(scope)), ca_certs_(std::move(ca_certs)) {} /** @@ -28,6 +29,7 @@ class OAuth2 { const std::string server_; const std::string client_id_; const std::string client_secret_; + const std::string scope_; const std::string ca_certs_; std::string token_; }; diff --git a/src/sota_tools/ostree_dir_repo.cc b/src/sota_tools/ostree_dir_repo.cc index b1a4eb0675..6e1fa9454f 100644 --- a/src/sota_tools/ostree_dir_repo.cc +++ b/src/sota_tools/ostree_dir_repo.cc @@ -2,6 +2,7 @@ #include +#include #include #include "logging/logging.h" diff --git a/src/sota_tools/ostree_dir_repo.h b/src/sota_tools/ostree_dir_repo.h index 1e951a5259..b58fd828e1 100644 --- a/src/sota_tools/ostree_dir_repo.h +++ b/src/sota_tools/ostree_dir_repo.h @@ -1,7 +1,7 @@ #ifndef SOTA_CLIENT_TOOLS_OSTREE_DIR_REPO_H_ #define SOTA_CLIENT_TOOLS_OSTREE_DIR_REPO_H_ -#include +#include #include "ostree_ref.h" #include "ostree_repo.h" @@ -9,11 +9,16 @@ class OSTreeDirRepo : public OSTreeRepo { public: explicit OSTreeDirRepo(boost::filesystem::path root_path) : root_(std::move(root_path)) {} + // Non-copyable, non-movable + OSTreeDirRepo(const OSTreeDirRepo&) = delete; + OSTreeDirRepo(OSTreeDirRepo&&) = delete; + OSTreeDirRepo& operator=(const OSTreeDirRepo&) = delete; + OSTreeDirRepo& operator=(OSTreeDirRepo&&) = delete; ~OSTreeDirRepo() override = default; bool LooksValid() const override; OSTreeRef GetRef(const std::string& refname) const override; - const boost::filesystem::path root() const override { return root_; } + boost::filesystem::path root() const override { return root_; } private: bool FetchObject(const boost::filesystem::path& path) const override; diff --git a/src/sota_tools/ostree_dir_repo_test.cc b/src/sota_tools/ostree_dir_repo_test.cc index 59351cf3e5..654427d54e 100644 --- a/src/sota_tools/ostree_dir_repo_test.cc +++ b/src/sota_tools/ostree_dir_repo_test.cc @@ -73,6 +73,27 @@ TEST(dir_repo, GetObject_Missing) { EXPECT_THROW(src_repo->GetObject(hash, OstreeObjectType::OSTREE_OBJECT_TYPE_DIR_META), OSTreeObjectMissing); } +TEST(dir_repo, GetPathForHash) { + auto p = OSTreeDirRepo::GetPathForHash( + OSTreeHash::Parse("1f3378927c2d062e40a372414c920219e506afeb8ef25f9ff72a27b792cd093a"), + OstreeObjectType::OSTREE_OBJECT_TYPE_COMMIT); + boost::filesystem::path golden{"1f/3378927c2d062e40a372414c920219e506afeb8ef25f9ff72a27b792cd093a.commit"}; + EXPECT_EQ(p, golden); +} + +TEST(dir_repo, GetPathForHashExtensions) { + auto hash = OSTreeHash::Parse("1f3378927c2d062e40a372414c920219e506afeb8ef25f9ff72a27b792cd093a"); + auto p = OSTreeDirRepo::GetPathForHash(hash, OstreeObjectType::OSTREE_OBJECT_TYPE_FILE); + EXPECT_EQ(p.extension(), ".filez"); + p = OSTreeDirRepo::GetPathForHash(hash, OstreeObjectType::OSTREE_OBJECT_TYPE_DIR_TREE); + EXPECT_EQ(p.extension(), ".dirtree"); + p = OSTreeDirRepo::GetPathForHash(hash, OstreeObjectType::OSTREE_OBJECT_TYPE_DIR_META); + EXPECT_EQ(p.extension(), ".dirmeta"); + p = OSTreeDirRepo::GetPathForHash(hash, OstreeObjectType::OSTREE_OBJECT_TYPE_COMMIT); + EXPECT_EQ(p.extension(), ".commit"); + EXPECT_ANY_THROW(OSTreeDirRepo::GetPathForHash(hash, OSTREE_OBJECT_TYPE_UNKNOWN)); +} + #ifndef __NO_MAIN__ int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); diff --git a/src/sota_tools/ostree_hash.cc b/src/sota_tools/ostree_hash.cc index aff85adab0..ed6b945b25 100644 --- a/src/sota_tools/ostree_hash.cc +++ b/src/sota_tools/ostree_hash.cc @@ -5,7 +5,7 @@ #include OSTreeHash OSTreeHash::Parse(const std::string& hash) { - uint8_t sha256[32]; + std::array sha256{}; std::string trimmed_hash = hash.substr(0, hash.find_last_not_of(" \t\n\r\f\v") + 1); std::istringstream refstr(trimmed_hash); @@ -15,36 +15,42 @@ OSTreeHash OSTreeHash::Parse(const std::string& hash) { throw OSTreeCommitParseError("OSTree Hash has invalid length"); } // sha256 is always 256 bits == 32 bytes long - for (int i = 0; i < 32; i++) { - char byte_string[3]; + for (size_t i = 0; i < sha256.size(); i++) { + std::array byte_string{}; byte_string[2] = 0; uint64_t byte_holder; - refstr.read(byte_string, 2); + refstr.read(byte_string.data(), 2); char* next_char; - byte_holder = strtoul(byte_string, &next_char, 16); + byte_holder = strtoul(byte_string.data(), &next_char, 16); if (next_char != &byte_string[2]) { throw OSTreeCommitParseError("Invalid character in OSTree commit hash"); } - sha256[i] = byte_holder & 0xFF; + sha256[i] = byte_holder & 0xFF; // NOLINT(cppcoreguidelines-pro-bounds-constant-array-index) } return OSTreeHash(sha256); } -OSTreeHash::OSTreeHash(const uint8_t hash[32]) { std::memcpy(hash_, hash, 32); } +// NOLINTNEXTLINE(modernize-avoid-c-arrays, cppcoreguidelines-avoid-c-arrays, hicpp-avoid-c-arrays) +OSTreeHash::OSTreeHash(const uint8_t hash[32]) { std::memcpy(hash_.data(), hash, hash_.size()); } + +OSTreeHash::OSTreeHash(const std::array& hash) { std::memcpy(hash_.data(), hash.data(), hash.size()); } std::string OSTreeHash::string() const { std::stringstream str_str; str_str.fill('0'); // sha256 hash is always 256 bits = 32 bytes long - for (int i = 0; i < 32; i++) { + for (size_t i = 0; i < hash_.size(); i++) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-constant-array-index) str_str << std::setw(2) << std::hex << static_cast(hash_[i]); } return str_str.str(); } -bool OSTreeHash::operator<(const OSTreeHash& other) const { return memcmp(hash_, other.hash_, 32) < 0; } +bool OSTreeHash::operator<(const OSTreeHash& other) const { + return memcmp(hash_.data(), other.hash_.data(), hash_.size()) < 0; +} std::ostream& operator<<(std::ostream& os, const OSTreeHash& obj) { os << obj.string(); diff --git a/src/sota_tools/ostree_hash.h b/src/sota_tools/ostree_hash.h index 0a42ad82cb..708dae1720 100644 --- a/src/sota_tools/ostree_hash.h +++ b/src/sota_tools/ostree_hash.h @@ -1,6 +1,7 @@ #ifndef SOTA_CLIENT_TOOLS_OSTREE_HASH_H_ #define SOTA_CLIENT_TOOLS_OSTREE_HASH_H_ +#include #include #include #include @@ -14,7 +15,9 @@ class OSTreeHash { */ static OSTreeHash Parse(const std::string& hash); - explicit OSTreeHash(const uint8_t /*hash*/[32]); + // NOLINTNEXTLINE(modernize-avoid-c-arrays, cppcoreguidelines-avoid-c-arrays, hicpp-avoid-c-arrays) + explicit OSTreeHash(const uint8_t hash[32]); + explicit OSTreeHash(const std::array& hash); std::string string() const; @@ -22,7 +25,7 @@ class OSTreeHash { friend std::ostream& operator<<(std::ostream& os, const OSTreeHash& obj); private: - uint8_t hash_[32]{}; + std::array hash_{}; }; class OSTreeCommitParseError : std::exception { diff --git a/src/sota_tools/ostree_http_repo.cc b/src/sota_tools/ostree_http_repo.cc index a7644bef70..c2c276aa2a 100644 --- a/src/sota_tools/ostree_http_repo.cc +++ b/src/sota_tools/ostree_http_repo.cc @@ -1,14 +1,11 @@ #include "ostree_http_repo.h" #include - #include +#include #include -#include "logging/logging.h" -#include "utilities/utils.h" - namespace pt = boost::property_tree; bool OSTreeHttpRepo::LooksValid() const { @@ -38,10 +35,7 @@ OSTreeRef OSTreeHttpRepo::GetRef(const std::string &refname) const { return OSTr bool OSTreeHttpRepo::FetchObject(const boost::filesystem::path &path) const { CURLcode err = CURLE_OK; - CurlEasyWrapper easy_handle; - curlEasySetoptWrapper(easy_handle.get(), CURLOPT_VERBOSE, get_curlopt_verbose()); - server_->InjectIntoCurl(path.string(), easy_handle.get()); - curlEasySetoptWrapper(easy_handle.get(), CURLOPT_WRITEFUNCTION, &OSTreeHttpRepo::curl_handle_write); + server_->InjectIntoCurl(path.string(), easy_handle_.get()); boost::filesystem::create_directories((root_ / path).parent_path()); std::string filename = (root_ / path).string(); int fp = open(filename.c_str(), O_WRONLY | O_CREAT, S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH); @@ -49,9 +43,8 @@ bool OSTreeHttpRepo::FetchObject(const boost::filesystem::path &path) const { LOG_ERROR << "Failed to open file: " << filename; return false; } - curlEasySetoptWrapper(easy_handle.get(), CURLOPT_WRITEDATA, &fp); - curlEasySetoptWrapper(easy_handle.get(), CURLOPT_FAILONERROR, true); - err = curl_easy_perform(easy_handle.get()); + curlEasySetoptWrapper(easy_handle_.get(), CURLOPT_WRITEDATA, &fp); + err = curl_easy_perform(easy_handle_.get()); close(fp); if (err == CURLE_HTTP_RETURNED_ERROR) { @@ -62,7 +55,7 @@ bool OSTreeHttpRepo::FetchObject(const boost::filesystem::path &path) const { } else if (err != CURLE_OK) { // other unexpected error char *last_url = nullptr; - curl_easy_getinfo(easy_handle.get(), CURLINFO_EFFECTIVE_URL, &last_url); + curl_easy_getinfo(easy_handle_.get(), CURLINFO_EFFECTIVE_URL, &last_url); LOG_ERROR << "Failed to get object:" << curl_easy_strerror(err); if (last_url != nullptr) { LOG_ERROR << "Url: " << last_url; diff --git a/src/sota_tools/ostree_http_repo.h b/src/sota_tools/ostree_http_repo.h index e001b4e444..1d0a443205 100644 --- a/src/sota_tools/ostree_http_repo.h +++ b/src/sota_tools/ostree_http_repo.h @@ -1,27 +1,38 @@ #ifndef SOTA_CLIENT_TOOLS_OSTREE_HTTP_REPO_H_ #define SOTA_CLIENT_TOOLS_OSTREE_HTTP_REPO_H_ -#include +#include +#include "logging/logging.h" #include "ostree_ref.h" #include "ostree_repo.h" #include "treehub_server.h" +#include "utilities/utils.h" class OSTreeHttpRepo : public OSTreeRepo { public: - explicit OSTreeHttpRepo(TreehubServer* server) : server_(server) {} - ~OSTreeHttpRepo() override = default; + explicit OSTreeHttpRepo(TreehubServer* server, boost::filesystem::path root_in = "") + : server_(server), root_(std::move(root_in)) { + if (root_.empty()) { + root_ = root_tmp_.Path(); + } + curlEasySetoptWrapper(easy_handle_.get(), CURLOPT_VERBOSE, get_curlopt_verbose()); + curlEasySetoptWrapper(easy_handle_.get(), CURLOPT_WRITEFUNCTION, &OSTreeHttpRepo::curl_handle_write); + curlEasySetoptWrapper(easy_handle_.get(), CURLOPT_FAILONERROR, true); + } bool LooksValid() const override; OSTreeRef GetRef(const std::string& refname) const override; - const boost::filesystem::path root() const override { return root_.Path(); } + boost::filesystem::path root() const override { return root_; } private: bool FetchObject(const boost::filesystem::path& path) const override; static size_t curl_handle_write(void* buffer, size_t size, size_t nmemb, void* userp); TreehubServer* server_; - const TemporaryDirectory root_; + boost::filesystem::path root_; + const TemporaryDirectory root_tmp_; + mutable CurlEasyWrapper easy_handle_; }; // vim: set tabstop=2 shiftwidth=2 expandtab: diff --git a/src/sota_tools/ostree_http_repo_test.cc b/src/sota_tools/ostree_http_repo_test.cc index 3a3c896c14..065d0fba79 100644 --- a/src/sota_tools/ostree_http_repo_test.cc +++ b/src/sota_tools/ostree_http_repo_test.cc @@ -95,7 +95,7 @@ TEST(http_repo, bad_connection) { auto hash = OSTreeHash::Parse("b9ac1e45f9227df8ee191b6e51e09417bd36c6ebbeff999431e3073ac50f0563"); TreehubServer push_server; EXPECT_EQ(authenticate(cert_path.string(), ServerCredentials(filepath), push_server), EXIT_SUCCESS); - UploadToTreehub(src_repo, push_server, hash, RunMode::kDefault, 1); + UploadToTreehub(src_repo, push_server, hash, RunMode::kDefault, 1, false); std::string diff("diff -r "); std::string src_path((src_dir.Path() / "objects").string() + " "); diff --git a/src/sota_tools/ostree_object.cc b/src/sota_tools/ostree_object.cc index bd6f9451eb..1b727cb07f 100644 --- a/src/sota_tools/ostree_object.cc +++ b/src/sota_tools/ostree_object.cc @@ -1,13 +1,13 @@ #include "ostree_object.h" -#include - +#include +#include #include +#include +#include #include #include -#include - #include "logging/logging.h" #include "ostree_repo.h" #include "request_pool.h" @@ -15,16 +15,17 @@ using std::string; -OSTreeObject::OSTreeObject(const OSTreeRepo &repo, const std::string &object_name) - : file_path_(repo.root() / "/objects/" / object_name), - object_name_(object_name), +OSTreeObject::OSTreeObject(const OSTreeRepo &repo, OSTreeHash hash, OstreeObjectType object_type) + : hash_(hash), + type_(object_type), repo_(repo), refcount_(0), is_on_server_(PresenceOnServer::kObjectStateUnknown), curl_handle_(nullptr), fd_(nullptr) { - if (!boost::filesystem::is_regular_file(file_path_)) { - throw std::runtime_error(file_path_.native() + " is not a valid OSTree repo."); + auto file_path = PathOnDisk(); + if (!boost::filesystem::is_regular_file(file_path)) { + throw std::runtime_error(file_path.native() + " is not a valid OSTree object."); } } @@ -73,27 +74,25 @@ void OSTreeObject::AppendChild(const OSTreeObject::ptr &child) { // Can throw OSTreeObjectMissing if the repo is corrupt void OSTreeObject::PopulateChildren() { - const boost::filesystem::path ext = file_path_.extension(); const GVariantType *content_type; bool is_commit; - // variant types are borrowed from libostree/ostree-core.h, - // but we don't want to create dependency on it - if (ext.compare(".commit") == 0) { - content_type = G_VARIANT_TYPE("(a{sv}aya(say)sstayay)"); + if (type_ == OSTREE_OBJECT_TYPE_COMMIT) { + content_type = OSTREE_COMMIT_GVARIANT_FORMAT; is_commit = true; - } else if (ext.compare(".dirtree") == 0) { - content_type = G_VARIANT_TYPE("(a(say)a(sayay))"); + } else if (type_ == OSTREE_OBJECT_TYPE_DIR_TREE) { + content_type = OSTREE_TREE_GVARIANT_FORMAT; is_commit = false; } else { return; } GError *gerror = nullptr; - GMappedFile *mfile = g_mapped_file_new(file_path_.c_str(), FALSE, &gerror); + auto file_path = PathOnDisk(); + GMappedFile *mfile = g_mapped_file_new(file_path.c_str(), FALSE, &gerror); if (mfile == nullptr) { - throw std::runtime_error("Failed to map metadata file " + file_path_.native()); + throw std::runtime_error("Failed to map metadata file " + file_path.native()); } GVariant *contents = @@ -179,7 +178,20 @@ void OSTreeObject::QueryChildren(RequestPool &pool) { } } -string OSTreeObject::Url() const { return "objects/" + object_name_; } +string OSTreeObject::Url() const { + boost::filesystem::path p("objects"); + p /= OSTreeRepo::GetPathForHash(hash_, type_); + return p.string(); +} + +boost::filesystem::path OSTreeObject::PathOnDisk() const { + auto path = repo_.root(); + path /= "objects"; + path /= OSTreeRepo::GetPathForHash(hash_, type_); + return path; +} + +uintmax_t OSTreeObject::GetSize() const { return boost::filesystem::file_size(PathOnDisk()); } void OSTreeObject::MakeTestRequest(const TreehubServer &push_target, CURLM *curl_multi_handle) { assert(!curl_handle_); @@ -209,9 +221,9 @@ void OSTreeObject::MakeTestRequest(const TreehubServer &push_target, CURLM *curl void OSTreeObject::Upload(TreehubServer &push_target, CURLM *curl_multi_handle, const RunMode mode) { if (mode == RunMode::kDefault || mode == RunMode::kPushTree) { - LOG_INFO << "Uploading " << object_name_; + LOG_INFO << "Uploading " << *this; } else { - LOG_INFO << "Would upload " << object_name_; + LOG_INFO << "Would upload " << *this; is_on_server_ = PresenceOnServer::kObjectPresent; return; } @@ -231,11 +243,12 @@ void OSTreeObject::Upload(TreehubServer &push_target, CURLM *curl_multi_handle, http_response_.str(""); // Empty the response buffer struct stat file_info {}; - fd_ = fopen(file_path_.c_str(), "rb"); + auto file_path = PathOnDisk(); + fd_ = fopen(file_path.c_str(), "rb"); if (fd_ == nullptr) { throw std::runtime_error("could not open file to be uploaded"); } else { - if (stat(file_path_.c_str(), &file_info) < 0) { + if (stat(file_path.c_str(), &file_info) < 0) { throw std::runtime_error("Could not get file information"); } } @@ -256,7 +269,7 @@ void OSTreeObject::Upload(TreehubServer &push_target, CURLM *curl_multi_handle, void OSTreeObject::CheckChildren(RequestPool &pool, const long rescode) { // NOLINT(google-runtime-int) try { PopulateChildren(); - LOG_TRACE << "Children of " << object_name_ << ": " << children_.size(); + LOG_TRACE << "Children of " << *this << ": " << children_.size(); if (children_ready()) { if (rescode != 200) { pool.AddUpload(this); @@ -299,10 +312,11 @@ void OSTreeObject::CurlDone(CURLM *curl_multi_handle, RequestPool &pool) { if (current_operation_ == CurrentOp::kOstreeObjectPresenceCheck) { // Sanity-check the handle's URL to make sure it contains the expected // object hash. - if (url == nullptr || strstr(url, object_name_.c_str()) == nullptr) { + // NOLINTNEXTLINE(bugprone-branch-clone) + if (url == nullptr || strstr(url, OSTreeRepo::GetPathForHash(hash_, type_).c_str()) == nullptr) { PresenceError(pool, rescode); } else if (rescode == 200) { - LOG_INFO << "Already present: " << object_name_; + LOG_INFO << "Already present: " << *this; is_on_server_ = PresenceOnServer::kObjectPresent; last_operation_result_ = ServerResponse::kOk; if (pool.run_mode() == RunMode::kWalkTree || pool.run_mode() == RunMode::kPushTree) { @@ -321,7 +335,8 @@ void OSTreeObject::CurlDone(CURLM *curl_multi_handle, RequestPool &pool) { } else if (current_operation_ == CurrentOp::kOstreeObjectUploading) { // Sanity-check the handle's URL to make sure it contains the expected // object hash. - if (url == nullptr || strstr(url, object_name_.c_str()) == nullptr) { + // NOLINTNEXTLINE(bugprone-branch-clone) + if (url == nullptr || strstr(url, Url().c_str()) == nullptr) { UploadError(pool, rescode); } else if (rescode == 204) { LOG_TRACE << "OSTree upload successful"; @@ -360,6 +375,41 @@ OSTreeObject::ptr ostree_object_from_curl(CURL *curlhandle) { return boost::intrusive_ptr(h); } +bool OSTreeObject::Fsck() const { + GFile *repo_path_file = g_file_new_for_path(repo_.root().c_str()); // Never fails + OstreeRepo *repo = ostree_repo_new(repo_path_file); + GError *err = nullptr; + auto ok = ostree_repo_open(repo, nullptr, &err); + + if (ok == FALSE) { + LOG_ERROR << "ostree_repo_open failed"; + if (err != nullptr) { + LOG_ERROR << "err:" << err->message; + g_error_free(err); + } + g_object_unref(repo_path_file); + g_object_unref(repo); + return false; + } + + ok = ostree_repo_fsck_object(repo, type_, hash_.string().c_str(), nullptr, &err); + + g_object_unref(repo_path_file); + g_object_unref(repo); + + if (ok == FALSE) { + LOG_WARNING << "Object " << *this << " is corrupt"; + if (err != nullptr) { + LOG_WARNING << "err:" << err->message; + g_error_free(err); + } + return false; + } + + LOG_DEBUG << "Object is OK"; + return true; +} + void intrusive_ptr_add_ref(OSTreeObject *h) { h->refcount_++; } void intrusive_ptr_release(OSTreeObject *h) { @@ -369,7 +419,7 @@ void intrusive_ptr_release(OSTreeObject *h) { } std::ostream &operator<<(std::ostream &stream, const OSTreeObject &o) { - stream << o.object_name_; + stream << OSTreeRepo::GetPathForHash(o.hash_, o.type_).native(); return stream; } diff --git a/src/sota_tools/ostree_object.h b/src/sota_tools/ostree_object.h index 5d038a8984..cc33716aff 100644 --- a/src/sota_tools/ostree_object.h +++ b/src/sota_tools/ostree_object.h @@ -6,11 +6,12 @@ #include #include -#include +#include #include #include "gtest/gtest_prod.h" #include "garage_common.h" +#include "ostree_hash.h" #include "treehub_server.h" class OSTreeRepo; @@ -21,18 +22,27 @@ enum class PresenceOnServer { kObjectStateUnknown, kObjectPresent, kObjectMissin enum class CurrentOp { kOstreeObjectUploading, kOstreeObjectPresenceCheck }; /** - * Broad categories for server response codes. - * There is no category for a permanent failure at the moment: we are unable to - * detect a failure that is definitely permanent. + * Broad categories for the result of attempting an upload. + * At the moment all errors from the server are considered temporary, because + * we are unable to detect a server failure that is definitely permanent. */ -enum class ServerResponse { kNoResponse, kOk, kTemporaryFailure }; +enum class ServerResponse { + /** The upload hasn't been attempted yet */ + kNoResponse, + /** The upload was successful */ + kOk, + /** There was an error uploading the object, but a retry may work */ + kTemporaryFailure, +}; class OSTreeObject { public: using ptr = boost::intrusive_ptr; - OSTreeObject(const OSTreeRepo& repo, const std::string& object_name); + OSTreeObject(const OSTreeRepo& repo, OSTreeHash hash, OstreeObjectType object_type); OSTreeObject(const OSTreeObject&) = delete; + OSTreeObject(OSTreeObject&&) = delete; OSTreeObject operator=(const OSTreeObject&) = delete; + OSTreeObject operator=(OSTreeObject&&) = delete; ~OSTreeObject(); @@ -50,16 +60,20 @@ class OSTreeObject { /* Process a completed curl transaction (presence check or upload). */ void CurlDone(CURLM* curl_multi_handle, RequestPool& pool); + uintmax_t GetSize() const; + PresenceOnServer is_on_server() const { return is_on_server_; } CurrentOp operation() const { return current_operation_; } - bool children_ready() { return children_.empty(); } + bool children_ready() const { return children_.empty(); } void LaunchNotify() { is_on_server_ = PresenceOnServer::kObjectInProgress; } std::chrono::steady_clock::time_point RequestStartTime() const { return request_start_time_; } ServerResponse LastOperationResult() const { return last_operation_result_; } + bool Fsck() const; + private: using childiter = std::list::iterator; - typedef std::pair parentref; + using parentref = std::pair; /* Add parent to this object. */ void AddParent(OSTreeObject* parent, std::list::iterator parent_it); @@ -92,6 +106,9 @@ class OSTreeObject { static size_t curl_handle_write(void* buffer, size_t size, size_t nmemb, void* userp); + /** Full path on disk to this object */ + boost::filesystem::path PathOnDisk() const; + FRIEND_TEST(OstreeObject, Request); FRIEND_TEST(OstreeObject, UploadDryRun); FRIEND_TEST(OstreeObject, UploadFail); @@ -100,8 +117,10 @@ class OSTreeObject { friend void intrusive_ptr_release(OSTreeObject* /*h*/); friend std::ostream& operator<<(std::ostream& stream, const OSTreeObject& o); - const boost::filesystem::path file_path_; // Full path to the object - const std::string object_name_; // OSTree name of the object + // SHA256 Hash of the object + const OSTreeHash hash_; + // Type of the object + const OstreeObjectType type_; const OSTreeRepo& repo_; int refcount_; // refcounts and intrusive_ptr are used to simplify // interaction with curl @@ -116,7 +135,6 @@ class OSTreeObject { std::chrono::steady_clock::time_point request_start_time_; ServerResponse last_operation_result_{ServerResponse::kNoResponse}; - OstreeObjectType type_{OstreeObjectType::OSTREE_OBJECT_TYPE_UNKNOWN}; }; OSTreeObject::ptr ostree_object_from_curl(CURL* curlhandle); diff --git a/src/sota_tools/ostree_object_test.cc b/src/sota_tools/ostree_object_test.cc index eee0591d96..15675fa312 100644 --- a/src/sota_tools/ostree_object_test.cc +++ b/src/sota_tools/ostree_object_test.cc @@ -16,16 +16,49 @@ std::string repo_path; /* Verify that constructor does not accept a nonexistent repo. */ TEST(OstreeObject, ConstructorBad) { + OSTreeDirRepo good_repo(repo_path); + OSTreeHash hash = good_repo.GetRef("master").GetHash(); OSTreeDirRepo bad_repo("nonexistentrepo"); - EXPECT_THROW(OSTreeObject(bad_repo, "bad"), std::runtime_error); + EXPECT_THROW(OSTreeObject(bad_repo, hash, OSTREE_OBJECT_TYPE_COMMIT), std::runtime_error); } /* Verify that constructor accepts a valid repo and commit hash. */ TEST(OstreeObject, ConstructorGood) { OSTreeDirRepo good_repo(repo_path); OSTreeHash hash = good_repo.GetRef("master").GetHash(); - boost::filesystem::path objpath = hash.string().insert(2, 1, '/'); - OSTreeObject(good_repo, objpath.string() + ".commit"); + OSTreeObject(good_repo, hash, OSTREE_OBJECT_TYPE_COMMIT); +} + +/** Check the << formatting hasn't changed */ +TEST(OstreeObject, OStreamFormat) { + OSTreeDirRepo good_repo(repo_path); + auto hash = good_repo.GetRef("master").GetHash(); + auto obj = good_repo.GetObject(hash, OstreeObjectType::OSTREE_OBJECT_TYPE_COMMIT); + std::stringstream ss; + ss << obj; + EXPECT_EQ(ss.str(), "b9/ac1e45f9227df8ee191b6e51e09417bd36c6ebbeff999431e3073ac50f0563.commit"); +} + +/** Check the OstreeObject::GetSize() is sane */ +TEST(OstreeObject, GetSize) { + OSTreeDirRepo good_repo(repo_path); + auto hash = good_repo.GetRef("master").GetHash(); + auto obj = good_repo.GetObject(hash, OstreeObjectType::OSTREE_OBJECT_TYPE_COMMIT); + EXPECT_EQ(obj->GetSize(), 118); +} + +TEST(OstreeObject, Fsck) { + OSTreeDirRepo repo("tests/sota_tools/corrupt-repo"); + auto good_object = + repo.GetObject(OSTreeHash::Parse("2ee758031340b51db1c0229bddd8f64bca4b131728d2bfb20c0c8671b1259a38"), + OstreeObjectType::OSTREE_OBJECT_TYPE_FILE); + + EXPECT_TRUE(good_object->Fsck()); + auto corrupt_object = + repo.GetObject(OSTreeHash::Parse("4145b1a9bade30efb28ff921f7a555ff82ba7d3b7b83b968084436167912fa83"), + OstreeObjectType::OSTREE_OBJECT_TYPE_FILE); + + EXPECT_FALSE(corrupt_object->Fsck()); } // This is a class solely for the purpose of being a FRIEND_TEST to diff --git a/src/sota_tools/ostree_ref.cc b/src/sota_tools/ostree_ref.cc index ed989823d7..f8767d2fab 100644 --- a/src/sota_tools/ostree_ref.cc +++ b/src/sota_tools/ostree_ref.cc @@ -1,8 +1,8 @@ #include "ostree_ref.h" #include +#include #include -#include #include #include #include diff --git a/src/sota_tools/ostree_ref.h b/src/sota_tools/ostree_ref.h index cb176f9729..47f9c00685 100644 --- a/src/sota_tools/ostree_ref.h +++ b/src/sota_tools/ostree_ref.h @@ -4,7 +4,6 @@ #include #include -#include #include "ostree_hash.h" #include "ostree_repo.h" @@ -14,12 +13,10 @@ class OSTreeRef { public: OSTreeRef(const OSTreeRepo& repo, const std::string& ref_name); OSTreeRef(const TreehubServer& serve_repo, std::string ref_name); - OSTreeRef(OSTreeRef&& rhs) = default; void PushRef(const TreehubServer& push_target, CURL* curl_handle) const; OSTreeHash GetHash() const; - std::string GetName() const { return ref_name_; }; bool IsValid() const; @@ -34,5 +31,5 @@ class OSTreeRef { static size_t curl_handle_write(void* buffer, size_t size, size_t nmemb, void* userp); }; -// vim: set tabstop=2 shiftwidth=2 expandtab: #endif // SOTA_CLIENT_TOOLS_OSTREE_REF_H_ +// vim: set tabstop=2 shiftwidth=2 expandtab: diff --git a/src/sota_tools/ostree_repo.cc b/src/sota_tools/ostree_repo.cc index 29a36712d4..baea0a7097 100644 --- a/src/sota_tools/ostree_repo.cc +++ b/src/sota_tools/ostree_repo.cc @@ -2,48 +2,87 @@ #include "logging/logging.h" +// NOLINTNEXTLINE(modernize-avoid-c-arrays, cppcoreguidelines-avoid-c-arrays, hicpp-avoid-c-arrays) OSTreeObject::ptr OSTreeRepo::GetObject(const uint8_t sha256[32], const OstreeObjectType type) const { return GetObject(OSTreeHash(sha256), type); } OSTreeObject::ptr OSTreeRepo::GetObject(const OSTreeHash hash, const OstreeObjectType type) const { + // If we've already seen this object, return another pointer to it otable::const_iterator obj_it = ObjectTable.find(hash); if (obj_it != ObjectTable.cend()) { return obj_it->second; } - const std::map exts{{OstreeObjectType::OSTREE_OBJECT_TYPE_FILE, ".filez"}, - {OstreeObjectType::OSTREE_OBJECT_TYPE_DIR_TREE, ".dirtree"}, - {OstreeObjectType::OSTREE_OBJECT_TYPE_DIR_META, ".dirmeta"}, - {OstreeObjectType::OSTREE_OBJECT_TYPE_COMMIT, ".commit"}}; - const std::string objpath = hash.string().insert(2, 1, '/'); OSTreeObject::ptr object; for (int i = 0; i < 3; ++i) { if (i > 0) { LOG_WARNING << "OSTree hash " << hash << " not found. Retrying (attempt " << i << " of 3)"; } - if (type != OstreeObjectType::OSTREE_OBJECT_TYPE_UNKNOWN) { - if (CheckForObject(hash, objpath + exts.at(type), object)) { + if (type != OSTREE_OBJECT_TYPE_UNKNOWN) { + if (CheckForObject(hash, type, &object)) { return object; } } else { - for (auto it = exts.cbegin(); it != exts.cend(); ++it) { - if (CheckForObject(hash, objpath + it->second, object)) { - return object; - } + // If we don't know the type for any reason, try the object types we know + // about. + if (CheckForObject(hash, OSTREE_OBJECT_TYPE_FILE, &object)) { + return object; + } + if (CheckForObject(hash, OSTREE_OBJECT_TYPE_DIR_META, &object)) { + return object; + } + if (CheckForObject(hash, OSTREE_OBJECT_TYPE_DIR_TREE, &object)) { + return object; + } + if (CheckForObject(hash, OSTREE_OBJECT_TYPE_COMMIT, &object)) { + return object; } } } + // We don't already have the object, and can't fetch it after a few retries => fail throw OSTreeObjectMissing(hash); } -bool OSTreeRepo::CheckForObject(const OSTreeHash &hash, const std::string &path, OSTreeObject::ptr &object) const { - if (FetchObject(std::string("objects/") + path)) { - object = OSTreeObject::ptr(new OSTreeObject(*this, path)); +bool OSTreeRepo::CheckForObject(const OSTreeHash &hash, OstreeObjectType type, OSTreeObject::ptr *object_out) const { + boost::filesystem::path path("objects"); + path /= GetPathForHash(hash, type); + if (FetchObject(path)) { + auto object = OSTreeObject::ptr(new OSTreeObject(*this, hash, type)); ObjectTable[hash] = object; + *object_out = object; LOG_DEBUG << "Fetched OSTree object " << path; return true; } return false; } + +/** + * Get the relative path on disk (or TreeHub) for an object. + * When an object has been successfully fetched, it will be on disk at + * root() / GetPathForHash() + * @param hash + * @param type + * @return + */ +boost::filesystem::path OSTreeRepo::GetPathForHash(OSTreeHash hash, OstreeObjectType type) { + std::string objpath = hash.string().insert(2, 1, '/'); + switch (type) { + case OstreeObjectType::OSTREE_OBJECT_TYPE_FILE: + objpath += ".filez"; + break; + case OstreeObjectType::OSTREE_OBJECT_TYPE_DIR_TREE: + objpath += ".dirtree"; + break; + case OstreeObjectType::OSTREE_OBJECT_TYPE_DIR_META: + objpath += ".dirmeta"; + break; + case OstreeObjectType::OSTREE_OBJECT_TYPE_COMMIT: + objpath += ".commit"; + break; + default: + throw OSTreeUnsupportedObjectType(type); + } + return boost::filesystem::path(objpath); +} \ No newline at end of file diff --git a/src/sota_tools/ostree_repo.h b/src/sota_tools/ostree_repo.h index b6c8907d45..972837e266 100644 --- a/src/sota_tools/ostree_repo.h +++ b/src/sota_tools/ostree_repo.h @@ -4,7 +4,7 @@ #include #include -#include +#include #include "garage_common.h" #include "ostree_hash.h" @@ -19,22 +19,40 @@ class OSTreeRef; class OSTreeRepo { public: using ptr = std::shared_ptr; + OSTreeRepo() = default; + // Non-copyable, Non-movable + OSTreeRepo(const OSTreeRepo&) = delete; + OSTreeRepo(OSTreeRepo&&) = delete; OSTreeRepo& operator=(const OSTreeRepo&) = delete; + OSTreeRepo& operator=(OSTreeRepo&&) = delete; virtual ~OSTreeRepo() = default; virtual bool LooksValid() const = 0; - virtual const boost::filesystem::path root() const = 0; + virtual boost::filesystem::path root() const = 0; virtual OSTreeRef GetRef(const std::string& refname) const = 0; OSTreeObject::ptr GetObject(OSTreeHash hash, OstreeObjectType type) const; + // NOLINTNEXTLINE(modernize-avoid-c-arrays, cppcoreguidelines-avoid-c-arrays, hicpp-avoid-c-arrays) OSTreeObject::ptr GetObject(const uint8_t sha256[32], OstreeObjectType type) const; + static boost::filesystem::path GetPathForHash(OSTreeHash hash, OstreeObjectType type); + protected: + /** + * Look for an object with a given path, downloading it if necessary and + * possible. + * For OSTreeDirRepo, this is a simple check to see if the file exists on + * disk. OSTreeHttpRepo will attempt to fetch the file from the remote + * server to a temporary directory (if it hasn't already been fetched). + * In either case, the following post-conditions hold + * FetchObject() returns false => The object is not available at all + * FetchObject() returns true => The object is on the local file system. + * */ virtual bool FetchObject(const boost::filesystem::path& path) const = 0; - bool CheckForObject(const OSTreeHash& hash, const std::string& path, OSTreeObject::ptr& object) const; + bool CheckForObject(const OSTreeHash& hash, OstreeObjectType type, OSTreeObject::ptr* object) const; - typedef std::map otable; + using otable = std::map; mutable otable ObjectTable; // Makes sure that the same commit object is not added twice }; @@ -53,5 +71,18 @@ class OSTreeObjectMissing : std::exception { private: OSTreeHash missing_object_; }; + +class OSTreeUnsupportedObjectType : std::exception { + public: + explicit OSTreeUnsupportedObjectType(OstreeObjectType bad_type) : bad_type_(bad_type) {} + + const char* what() const noexcept override { return "Unknown OstreeObjectType"; } + + OstreeObjectType bad_type() const { return bad_type_; } + + private: + OstreeObjectType bad_type_; +}; + // vim: set tabstop=2 shiftwidth=2 expandtab: #endif // SOTA_CLIENT_TOOLS_OSTREE_REPO_H_ diff --git a/src/sota_tools/rate_controller.cc b/src/sota_tools/rate_controller.cc index bd3a189a73..569b93a5f3 100644 --- a/src/sota_tools/rate_controller.cc +++ b/src/sota_tools/rate_controller.cc @@ -48,6 +48,8 @@ bool RateController::ServerHasFailed() const { return sleep_time_ > kMaxSleepTime; } +// These assert()'s are compiled out in Release builds, which triggers a clang-tidy warning +// NOLINTNEXTLINE(readability-convert-member-functions-to-static) void RateController::CheckInvariants() const { assert((sleep_time_ == clock::duration(0)) || (max_concurrency_ == 1)); assert(0 < max_concurrency_); diff --git a/src/sota_tools/rate_controller.h b/src/sota_tools/rate_controller.h index 7b88d42339..77ec503415 100644 --- a/src/sota_tools/rate_controller.h +++ b/src/sota_tools/rate_controller.h @@ -17,8 +17,11 @@ class RateController { public: using clock = std::chrono::steady_clock; explicit RateController(int concurrency_cap = 30); + ~RateController() = default; RateController(const RateController&) = delete; + RateController(RateController&&) = delete; RateController operator=(const RateController&) = delete; + RateController operator=(RateController&&) = delete; void RequestCompleted(clock::time_point start_time, clock::time_point end_time, bool succeeded); diff --git a/src/sota_tools/request_pool.cc b/src/sota_tools/request_pool.cc index 40262e74e5..c8191bbea1 100644 --- a/src/sota_tools/request_pool.cc +++ b/src/sota_tools/request_pool.cc @@ -7,8 +7,13 @@ #include "logging/logging.h" -RequestPool::RequestPool(TreehubServer& server, const int max_curl_requests, const RunMode mode) - : rate_controller_(max_curl_requests), running_requests_(0), server_(server), mode_(mode), stopped_(false) { +RequestPool::RequestPool(TreehubServer& server, const int max_curl_requests, const RunMode mode, bool fsck_on_upload) + : rate_controller_(max_curl_requests), + running_requests_(0), + server_(server), + mode_(mode), + fsck_on_upload_(fsck_on_upload), + stopped_(false) { curl_global_init(CURL_GLOBAL_DEFAULT); multi_ = curl_multi_init(); curl_multi_setopt(multi_, CURLMOPT_PIPELINING, CURLPIPE_HTTP1 | CURLPIPE_MULTIPLEX); @@ -53,20 +58,32 @@ void RequestPool::LoopLaunch() { // Queries first, uploads second if (query_queue_.empty()) { + // Uploads cur = upload_queue_.front(); upload_queue_.pop_front(); + // Check object's integrity before uploading them, but after we know they + // are not present on the server + if (fsck_on_upload_) { + if (!cur->Fsck()) { + LOG_ERROR << "Local object " << cur << " is corrupt. Aborting upload."; + Abort(); + continue; + } + } cur->Upload(server_, multi_, mode_); - total_requests_made_++; + put_requests_made_++; + total_object_size_ += cur->GetSize(); if (mode_ == RunMode::kDryRun || mode_ == RunMode::kWalkTree) { // Don't send an actual upload message, just skip to the part where we // acknowledge that the object has been uploaded. cur->NotifyParents(*this); } } else { + // Queries cur = query_queue_.front(); query_queue_.pop_front(); cur->MakeTestRequest(server_, multi_); - total_requests_made_++; + head_requests_made_++; } running_requests_++; @@ -79,11 +96,13 @@ void RequestPool::LoopListen() { // https://curl.haxx.se/libcurl/c/curl_multi_fdset.html CURLMcode mc; // Poll for IO - fd_set fdread, fdwrite, fdexcept; + fd_set fdread; + fd_set fdwrite; + fd_set fdexcept; int maxfd = 0; - FD_ZERO(&fdread); - FD_ZERO(&fdwrite); - FD_ZERO(&fdexcept); + FD_ZERO(&fdread); // NOLINT(readability-isolate-declaration) + FD_ZERO(&fdwrite); // NOLINT(readability-isolate-declaration) + FD_ZERO(&fdexcept); // NOLINT(readability-isolate-declaration) long timeoutms = 0; // NOLINT(google-runtime-int) mc = curl_multi_timeout(multi_, &timeoutms); if (mc != CURLM_OK) { @@ -135,12 +154,13 @@ void RequestPool::LoopListen() { do { CURLMsg* msg = curl_multi_info_read(multi_, &msgs_in_queue); if ((msg != nullptr) && msg->msg == CURLMSG_DONE) { - OSTreeObject::ptr h = ostree_object_from_curl(msg->easy_handle); - h->CurlDone(multi_, *this); - const bool server_responded_ok = h->LastOperationResult() == ServerResponse::kOk; - const RateController::clock::time_point start_time = h->RequestStartTime(); - const RateController::clock::time_point end_time = RateController::clock::now(); + OSTreeObject::ptr completed_object = ostree_object_from_curl(msg->easy_handle); + completed_object->CurlDone(multi_, *this); + auto start_time = completed_object->RequestStartTime(); + auto end_time = RateController::clock::now(); + bool server_responded_ok = completed_object->LastOperationResult() == ServerResponse::kOk; rate_controller_.RequestCompleted(start_time, end_time, server_responded_ok); + if (rate_controller_.ServerHasFailed()) { Abort(); } else { diff --git a/src/sota_tools/request_pool.h b/src/sota_tools/request_pool.h index e461ef1d2c..5e9c0c2065 100644 --- a/src/sota_tools/request_pool.h +++ b/src/sota_tools/request_pool.h @@ -11,8 +11,14 @@ class RequestPool { public: - RequestPool(TreehubServer& server, int max_curl_requests, RunMode mode); + RequestPool(TreehubServer& server, int max_curl_requests, RunMode mode, bool fsck_on_upload); ~RequestPool(); + // Non-Copyable, Non-Movable + RequestPool(const RequestPool&) = delete; + RequestPool(RequestPool&&) = delete; + RequestPool& operator=(const RequestPool&) = delete; + RequestPool& operator=(RequestPool&&) = delete; + void AddQuery(const OSTreeObject::ptr& request); void AddUpload(const OSTreeObject::ptr& request); void Abort() { @@ -33,7 +39,9 @@ class RequestPool { * The number of HEAD + PUT requests that have been sent to curl. This * includes requests that eventually returned 500 and get retried. */ - int total_requests_made() { return total_requests_made_; } + int put_requests_made() const { return put_requests_made_; } + int head_requests_made() const { return head_requests_made_; } + uintmax_t total_object_size() const { return total_object_size_; } private: void LoopLaunch(); // launches multiple requests from the queues @@ -41,12 +49,15 @@ class RequestPool { RateController rate_controller_; int running_requests_; - int total_requests_made_{0}; + int head_requests_made_{0}; + int put_requests_made_{0}; + uintmax_t total_object_size_{0}; TreehubServer& server_; CURLM* multi_; std::list query_queue_; std::list upload_queue_; RunMode mode_; + bool fsck_on_upload_; bool stopped_; }; // vim: set tabstop=2 shiftwidth=2 expandtab: diff --git a/src/sota_tools/server_credentials.cc b/src/sota_tools/server_credentials.cc index 20ef4e2491..e90547408a 100644 --- a/src/sota_tools/server_credentials.cc +++ b/src/sota_tools/server_credentials.cc @@ -102,6 +102,7 @@ ServerCredentials::ServerCredentials(const boost::filesystem::path &credentials_ auth_server_ = ap_pt->get("server", ""); client_id_ = ap_pt->get("client_id", ""); client_secret_ = ap_pt->get("client_secret", ""); + scope_ = ap_pt->get("scope", ""); } else if (optional ba_pt = pt.get_child_optional("basic_auth")) { method_ = AuthMethod::kBasic; auth_user_ = ba_pt->get("user", ""); diff --git a/src/sota_tools/server_credentials.h b/src/sota_tools/server_credentials.h index ea8a434fcd..c38da45489 100644 --- a/src/sota_tools/server_credentials.h +++ b/src/sota_tools/server_credentials.h @@ -3,7 +3,7 @@ #include -#include +#include enum class AuthMethod { kNone = 0, kBasic, kOauth2, kTls }; @@ -35,6 +35,7 @@ class ServerCredentials { std::string GetOSTreeServer() const { return ostree_server_; }; std::string GetClientId() const { return client_id_; }; std::string GetClientSecret() const { return client_secret_; }; + std::string GetScope() const { return scope_; }; /** * Path to the original credentials.zip on disk. Needed to hand off to @@ -52,6 +53,7 @@ class ServerCredentials { std::string ostree_server_; std::string client_id_; std::string client_secret_; + std::string scope_; boost::filesystem::path credentials_path_; }; diff --git a/src/sota_tools/treehub_server.cc b/src/sota_tools/treehub_server.cc index 9bb3fc3562..5049c9f6f9 100644 --- a/src/sota_tools/treehub_server.cc +++ b/src/sota_tools/treehub_server.cc @@ -1,7 +1,6 @@ #include "treehub_server.h" -#include - +#include #include #include @@ -51,6 +50,7 @@ void TreehubServer::SetAuthBasic(const std::string& username, const std::string& // Note that this method creates a reference from curl_handle to this. Keep // this TreehubServer object alive until the curl request has been completed void TreehubServer::InjectIntoCurl(const string& url_suffix, CURL* curl_handle, const bool tufrepo) const { + // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) std::string url = (tufrepo ? repo_url_ : root_url_); if (*url.rbegin() != '/' && *url_suffix.begin() != '/') { @@ -91,14 +91,14 @@ void TreehubServer::InjectIntoCurl(const string& url_suffix, CURL* curl_handle, // The trailing slash is optional, and will be appended if required void TreehubServer::root_url(const std::string& _root_url) { root_url_ = _root_url; - if (root_url_.size() > 0 && root_url_[root_url_.size() - 1] != '/') { + if (!root_url_.empty() && root_url_[root_url_.size() - 1] != '/') { root_url_.append("/"); } } void TreehubServer::repo_url(const std::string& _repo_url) { repo_url_ = _repo_url; - if (repo_url_.size() > 0 && repo_url_[repo_url_.size() - 1] != '/') { + if (!repo_url_.empty() && repo_url_[repo_url_.size() - 1] != '/') { repo_url_.append("/"); } } diff --git a/src/uptane_generator/CMakeLists.txt b/src/uptane_generator/CMakeLists.txt index 65bb46c9db..21680cacc3 100644 --- a/src/uptane_generator/CMakeLists.txt +++ b/src/uptane_generator/CMakeLists.txt @@ -2,10 +2,7 @@ set(UPTANE_GENERATOR_SRC repo.cc director_repo.cc image_repo.cc uptane_repo.cc) set(UPTANE_GENERATOR_HDR repo.h director_repo.h image_repo.h uptane_repo.h) -set(UPTANE_GENERATOR_LIBS - aktualizr_static_lib - ${AKTUALIZR_EXTERNAL_LIBS} -) +set(UPTANE_GENERATOR_LIBS aktualizr_lib) add_library(uptane_generator_lib ${UPTANE_GENERATOR_SRC}) target_include_directories(uptane_generator_lib PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) add_executable(uptane-generator main.cc ${UPTANE_GENERATOR_SRC}) @@ -13,7 +10,7 @@ target_link_libraries(uptane-generator ${UPTANE_GENERATOR_LIBS}) install(TARGETS uptane-generator COMPONENT aktualizr - RUNTIME DESTINATION bin) + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) aktualizr_source_file_checks(${UPTANE_GENERATOR_SRC} ${UPTANE_GENERATOR_HDR}) add_aktualizr_test(NAME uptane_generator @@ -22,6 +19,15 @@ add_aktualizr_test(NAME uptane_generator ARGS $ PROJECT_WORKING_DIRECTORY) +# Check the --help option works. +add_test(NAME uptane-generator-option-help + COMMAND uptane-generator --help) + +# Report version. +add_test(NAME uptane-generator-option-version + COMMAND uptane-generator --version) +set_tests_properties(uptane-generator-option-version PROPERTIES PASS_REGULAR_EXPRESSION "Current uptane-generator version is: ${AKTUALIZR_VERSION}") + aktualizr_source_file_checks(${TEST_SOURCES} main.cc) # vim: set tabstop=4 shiftwidth=4 expandtab: diff --git a/src/uptane_generator/director_repo.cc b/src/uptane_generator/director_repo.cc index f4e0b10ce7..fab4385476 100644 --- a/src/uptane_generator/director_repo.cc +++ b/src/uptane_generator/director_repo.cc @@ -1,7 +1,11 @@ #include "director_repo.h" +#include + +#include "utilities/utils.h" + void DirectorRepo::addTarget(const std::string &target_name, const Json::Value &target, const std::string &hardware_id, - const std::string &ecu_serial, const std::string &url) { + const std::string &ecu_serial, const std::string &url, const std::string &expires) { const boost::filesystem::path current = path_ / DirectorRepo::dir / "targets.json"; const boost::filesystem::path staging = path_ / DirectorRepo::dir / "staging/targets.json"; @@ -14,6 +18,9 @@ void DirectorRepo::addTarget(const std::string &target_name, const Json::Value & throw std::runtime_error(std::string("targets.json not found at ") + staging.c_str() + " or " + current.c_str() + "!"); } + if (!expires.empty()) { + director_targets["expires"] = expires; + } director_targets["targets"][target_name] = target; director_targets["targets"][target_name]["custom"].removeMember("hardwareIds"); director_targets["targets"][target_name]["custom"]["ecuIdentifiers"][ecu_serial]["hardwareId"] = hardware_id; @@ -22,6 +29,7 @@ void DirectorRepo::addTarget(const std::string &target_name, const Json::Value & } else { director_targets["targets"][target_name]["custom"].removeMember("uri"); } + director_targets["targets"][target_name]["custom"].removeMember("version"); director_targets["version"] = (Utils::parseJSONFile(current)["signed"]["version"].asUInt()) + 1; Utils::writeFile(staging, Utils::jsonToCanonicalStr(director_targets)); updateRepo(); @@ -32,7 +40,7 @@ void DirectorRepo::revokeTargets(const std::vector &targets_to_remo auto targets_unsigned = Utils::parseJSONFile(targets_path)["signed"]; Json::Value new_targets; - for (Json::ValueIterator it = targets_unsigned["targets"].begin(); it != targets_unsigned["targets"].end(); ++it) { + for (auto it = targets_unsigned["targets"].begin(); it != targets_unsigned["targets"].end(); ++it) { if (std::find(targets_to_remove.begin(), targets_to_remove.end(), it.key().asString()) == targets_to_remove.end()) { new_targets[it.key().asString()] = *it; } @@ -75,7 +83,7 @@ void DirectorRepo::emptyTargets() { targets_unsigned["expires"] = expiration_time_; targets_unsigned["version"] = (targets_current["signed"]["version"].asUInt()) + 1; targets_unsigned["targets"] = Json::objectValue; - if (repo_type_ == Uptane::RepositoryType::Director() && correlation_id_ != "") { + if (repo_type_ == Uptane::RepositoryType::Director() && !correlation_id_.empty()) { targets_unsigned["custom"]["correlationId"] = correlation_id_; } Utils::writeFile(staging, Utils::jsonToCanonicalStr(targets_unsigned)); diff --git a/src/uptane_generator/director_repo.h b/src/uptane_generator/director_repo.h index 54f51019aa..e84acd4d88 100644 --- a/src/uptane_generator/director_repo.h +++ b/src/uptane_generator/director_repo.h @@ -8,7 +8,7 @@ class DirectorRepo : public Repo { DirectorRepo(boost::filesystem::path path, const std::string &expires, std::string correlation_id) : Repo(Uptane::RepositoryType::Director(), std::move(path), expires, std::move(correlation_id)) {} void addTarget(const std::string &target_name, const Json::Value &target, const std::string &hardware_id, - const std::string &ecu_serial, const std::string &url); + const std::string &ecu_serial, const std::string &url = "", const std::string &expires = ""); void revokeTargets(const std::vector &targets_to_remove); void signTargets(); void emptyTargets(); diff --git a/src/uptane_generator/image_repo.cc b/src/uptane_generator/image_repo.cc index d8ec87c917..c63dc1a573 100644 --- a/src/uptane_generator/image_repo.cc +++ b/src/uptane_generator/image_repo.cc @@ -1,5 +1,10 @@ #include "image_repo.h" +#include + +#include "crypto/crypto.h" +#include "utilities/utils.h" + void ImageRepo::addImage(const std::string &name, Json::Value &target, const std::string &hardware_id, const Delegation &delegation) { boost::filesystem::path repo_dir(path_ / ImageRepo::dir); @@ -19,42 +24,55 @@ void ImageRepo::addImage(const std::string &name, Json::Value &target, const std } void ImageRepo::addBinaryImage(const boost::filesystem::path &image_path, const boost::filesystem::path &targetname, - const std::string &hardware_id, const std::string &url, const Delegation &delegation) { + const std::string &hardware_id, const std::string &url, const int32_t custom_version, + const Delegation &delegation, const Json::Value &custom) { boost::filesystem::path repo_dir(path_ / ImageRepo::dir); boost::filesystem::path targets_path = repo_dir / "targets"; auto targetname_dir = targetname.parent_path(); boost::filesystem::create_directories(targets_path / targetname_dir); - boost::filesystem::copy_file(image_path, targets_path / targetname_dir / image_path.filename(), + boost::filesystem::copy_file(image_path, targets_path / targetname_dir / targetname.filename(), boost::filesystem::copy_option::overwrite_if_exists); std::string image = Utils::readFile(image_path); Json::Value target; target["length"] = Json::UInt64(image.size()); - target["hashes"]["sha256"] = boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha256digest(image))); - target["hashes"]["sha512"] = boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha512digest(image))); - target["custom"]["targetFormat"] = "BINARY"; + target["hashes"]["sha256"] = Crypto::sha256digestHex(image); + target["hashes"]["sha512"] = Crypto::sha512digestHex(image); + target["custom"] = custom; + if (!target["custom"].isMember("targetFormat")) { + target["custom"]["targetFormat"] = "BINARY"; + } if (!url.empty()) { target["custom"]["uri"] = url; } + if (custom_version != 0) { + target["custom"]["version"] = custom_version; + } addImage(targetname.string(), target, hardware_id, delegation); } -void ImageRepo::addCustomImage(const std::string &name, const Uptane::Hash &hash, const uint64_t length, - const std::string &hardware_id, const std::string &url, const Delegation &delegation, - const Json::Value &custom) { +void ImageRepo::addCustomImage(const std::string &name, const Hash &hash, const uint64_t length, + const std::string &hardware_id, const std::string &url, const int32_t custom_version, + const Delegation &delegation, const Json::Value &custom) { Json::Value target; target["length"] = Json::UInt(length); - if (hash.type() == Uptane::Hash::Type::kSha256) { + if (hash.type() == Hash::Type::kSha256) { target["hashes"]["sha256"] = hash.HashString(); - } else if (hash.type() == Uptane::Hash::Type::kSha512) { + } else if (hash.type() == Hash::Type::kSha512) { target["hashes"]["sha512"] = hash.HashString(); } target["custom"] = custom; + if (!target["custom"].isMember("targetFormat")) { + target["custom"]["targetFormat"] = "OSTREE"; + } if (!url.empty()) { target["custom"]["uri"] = url; } + if (custom_version != 0) { + target["custom"]["version"] = custom_version; + } addImage(name, target, hardware_id, delegation); } @@ -107,6 +125,7 @@ void ImageRepo::addDelegation(const Uptane::Role &name, const Uptane::Role &pare updateRepo(); } +// NOLINTNEXTLINE(misc-no-recursion) void ImageRepo::removeDelegationRecursive(const Uptane::Role &name, const Uptane::Role &parent_name) { boost::filesystem::path repo_dir(path_ / ImageRepo::dir); if (parent_name.IsDelegation()) { @@ -154,7 +173,7 @@ std::vector ImageRepo::getDelegationTargets(const Uptane::Role &nam std::vector result; boost::filesystem::path repo_dir(path_ / ImageRepo::dir); auto targets = Utils::parseJSONFile((repo_dir / "delegations") / (name.ToString() + ".json"))["signed"]["targets"]; - for (Json::ValueIterator it = targets.begin(); it != targets.end(); ++it) { + for (auto it = targets.begin(); it != targets.end(); ++it) { result.push_back(it.key().asString()); } return result; diff --git a/src/uptane_generator/image_repo.h b/src/uptane_generator/image_repo.h index 1248d4468c..641785593a 100644 --- a/src/uptane_generator/image_repo.h +++ b/src/uptane_generator/image_repo.h @@ -8,9 +8,10 @@ class ImageRepo : public Repo { ImageRepo(boost::filesystem::path path, const std::string &expires, std::string correlation_id) : Repo(Uptane::RepositoryType::Image(), std::move(path), expires, std::move(correlation_id)) {} void addBinaryImage(const boost::filesystem::path &image_path, const boost::filesystem::path &targetname, - const std::string &hardware_id, const std::string &url, const Delegation &delegation = {}); - void addCustomImage(const std::string &name, const Uptane::Hash &hash, uint64_t length, - const std::string &hardware_id, const std::string &url, const Delegation &delegation, + const std::string &hardware_id, const std::string &url = "", int32_t custom_version = 0, + const Delegation &delegation = {}, const Json::Value &custom = {}); + void addCustomImage(const std::string &name, const Hash &hash, uint64_t length, const std::string &hardware_id, + const std::string &url = "", int32_t custom_version = 0, const Delegation &delegation = {}, const Json::Value &custom = {}); void addDelegation(const Uptane::Role &name, const Uptane::Role &parent_role, const std::string &path, bool terminating, KeyType key_type); diff --git a/src/uptane_generator/main.cc b/src/uptane_generator/main.cc index 544ddff71d..97204e58aa 100644 --- a/src/uptane_generator/main.cc +++ b/src/uptane_generator/main.cc @@ -8,6 +8,8 @@ #include "logging/logging.h" #include "uptane_repo.h" +#include "utilities/aktualizr_version.h" +#include "utilities/utils.h" namespace po = boost::program_options; @@ -19,20 +21,35 @@ KeyType parseKeyType(const po::variables_map &vm) { return key_type; } +void check_info_options(const po::options_description &description, const po::variables_map &vm) { + if (vm.count("help") != 0 || (vm.count("command") == 0 && vm.count("version") == 0)) { + std::cout << description << '\n'; + exit(EXIT_SUCCESS); + } + if (vm.count("version") != 0) { + std::cout << "Current uptane-generator version is: " << aktualizr_version() << "\n"; + exit(EXIT_SUCCESS); + } +} + int main(int argc, char **argv) { po::options_description desc("uptane-generator command line options"); // clang-format off desc.add_options() ("help,h", "print usage") + ("version,v", "Current uptane-generator version") ("command", po::value(), "generate: \tgenerate a new repository\n" - "adddelegation: \tadd a delegated role to the images metadata\n" - "revokedelegation: \tremove delegated role from the images metadata and all signed targets of this role\n" - "image: \tadd a target to the images metadata\n" - "addtarget: \tprepare director targets metadata for a given device\n" - "signtargets: \tsign the staged director targets metadata\n" - "emptytargets: \tclear the staged director targets metadata\n" - "oldtargets: \tfill the staged director targets metadata with what is currently signed\n" - "sign: \tsign arbitrary metadata with repo keys") + "adddelegation: \tadd a delegated role to the Image repo metadata\n" + "revokedelegation: \tremove delegated role from the Image repo metadata and all signed targets of this role\n" + "image: \tadd a target to the Image repo metadata\n" + "addtarget: \tprepare Director Targets metadata for a given device\n" + "signtargets: \tsign the staged Director Targets metadata\n" + "emptytargets: \tclear the staged Director Targets metadata\n" + "oldtargets: \tfill the staged Director Targets metadata with what is currently signed\n" + "sign: \tsign arbitrary metadata with repo keys\n" + "addcampaigns: \tgenerate campaigns json\n" + "refresh: \trefresh a metadata object (bump the version)\n" + "rotate: \trotate a Root metadata key") ("path", po::value(), "path to the repository") ("filename", po::value(), "path to the image") ("hwid", po::value(), "target hardware identifier") @@ -43,7 +60,7 @@ int main(int argc, char **argv) { ("keyname", po::value(), "name of key's role") ("repotype", po::value(), "director|image") ("correlationid", po::value()->default_value(""), "correlation id") - ("keytype", po::value()->default_value("RSA2048"), "UPTANE key type") + ("keytype", po::value()->default_value("RSA2048"), "Uptane key type") ("targetname", po::value(), "target's name (if different than filename)") ("targetsha256", po::value(), "target's SHA256 hash (for adding metadata without an actual file)") ("targetsha512", po::value(), "target's SHA512 hash (for adding metadata without an actual file)") @@ -52,8 +69,8 @@ int main(int argc, char **argv) { ("dterm", po::bool_switch(), "if the created delegated role is terminating") ("dparent", po::value()->default_value("targets"), "delegated role parent name") ("dpattern", po::value(), "delegated file path pattern") - ("url", po::value(), "custom download URL"); - + ("url", po::value(), "custom download URL") + ("customversion", po::value(), "custom version"); // clang-format on po::positional_options_description positionalOptions; @@ -68,11 +85,8 @@ int main(int argc, char **argv) { po::basic_parsed_options parsed_options = po::command_line_parser(argc, argv).options(desc).positional(positionalOptions).run(); po::store(parsed_options, vm); + check_info_options(desc, vm); po::notify(vm); - if (vm.count("help") != 0) { - std::cout << desc << std::endl; - exit(EXIT_SUCCESS); - } if (vm.count("command") != 0 && vm.count("path") != 0) { std::string expiration_time; @@ -120,35 +134,40 @@ int main(int argc, char **argv) { if (vm.count("url") != 0) { url = vm["url"].as(); } + int32_t custom_version{0}; + if (vm.count("customversion") != 0) { + custom_version = vm["customversion"].as(); + } + Json::Value custom; + if (vm.count("targetcustom") > 0 && vm.count("targetformat") > 0) { + std::cerr << "--targetcustom and --targetformat cannot be used together"; + exit(EXIT_FAILURE); + } + if (vm.count("targetcustom") > 0) { + std::ifstream custom_file(vm["targetcustom"].as().c_str()); + custom_file >> custom; + } else if (vm.count("targetformat") > 0) { + custom = Json::Value(); + custom["targetFormat"] = vm["targetformat"].as(); + } if (vm.count("filename") > 0) { - repo.addImage(vm["filename"].as(), targetname, hwid, url, delegation); - std::cout << "Added a target " << targetname << " to the images metadata" << std::endl; + repo.addImage(vm["filename"].as(), targetname, hwid, url, custom_version, delegation, + custom); + std::cout << "Added a target " << targetname << " to the Image repo metadata" << std::endl; } else { if ((vm.count("targetsha256") == 0 && vm.count("targetsha512") == 0) || vm.count("targetlength") == 0) { std::cerr << "image command requires --targetsha256 or --targetsha512, and --targetlength when --filename " "is not supplied.\n"; exit(EXIT_FAILURE); } - std::unique_ptr hash; + std::unique_ptr hash; if (vm.count("targetsha256") > 0) { - hash = std_::make_unique(Uptane::Hash::Type::kSha256, vm["targetsha256"].as()); + hash = std_::make_unique(Hash::Type::kSha256, vm["targetsha256"].as()); } else { - hash = std_::make_unique(Uptane::Hash::Type::kSha512, vm["targetsha512"].as()); - } - Json::Value custom; - if (vm.count("targetcustom") > 0 && vm.count("targetformat") > 0) { - std::cerr << "--targetcustom and --targetformat cannot be used together"; - exit(EXIT_FAILURE); - } - if (vm.count("targetcustom") > 0) { - std::ifstream custom_file(vm["targetcustom"].as().c_str()); - custom_file >> custom; - } else if (vm.count("targetformat") > 0) { - custom = Json::Value(); - custom["targetFormat"] = vm["targetformat"].as(); + hash = std_::make_unique(Hash::Type::kSha512, vm["targetsha512"].as()); } - repo.addCustomImage(targetname.string(), *hash, vm["targetlength"].as(), hwid, url, delegation, - custom); + repo.addCustomImage(targetname.string(), *hash, vm["targetlength"].as(), hwid, url, custom_version, + delegation, custom); std::cout << "Added a custom image target " << targetname.string() << std::endl; } } else if (command == "addtarget") { @@ -163,8 +182,8 @@ int main(int argc, char **argv) { if (vm.count("url") != 0) { url = vm["url"].as(); } - repo.addTarget(targetname, hwid, serial, url); - std::cout << "Added target " << targetname << " to director targets metadata for ECU with serial " << serial + repo.addTarget(targetname, hwid, serial, url, expiration_time); + std::cout << "Added target " << targetname << " to Director Targets metadata for ECU with serial " << serial << " and hardware ID " << hwid << std::endl; } else if (command == "adddelegation") { if (vm.count("dname") == 0 || vm.count("dpattern") == 0) { @@ -174,10 +193,10 @@ int main(int argc, char **argv) { std::string dparent = vm["dparent"].as(); std::string dpattern = vm["dpattern"].as(); KeyType key_type = parseKeyType(vm); - repo.addDelegation(Uptane::Role(dname, true), Uptane::Role(dparent, dparent != "targets"), - vm["dpattern"].as(), vm["dterm"].as(), key_type); - std::cout << "Added a delegated role " << dname << " with dpattern " << dpattern << " to the images metadata" - << std::endl; + repo.addDelegation(Uptane::Role(dname, true), Uptane::Role(dparent, dparent != "targets"), dpattern, + vm["dterm"].as(), key_type); + std::cout << "Added a delegated role " << dname << " with dpattern " << dpattern + << " to the Image repo metadata" << std::endl; } else if (command == "revokedelegation") { if (vm.count("dname") == 0) { std::cerr << "revokedelegation command requires --dname\n"; @@ -187,13 +206,13 @@ int main(int argc, char **argv) { std::cout << "Revoked the delegation " << dname << std::endl; } else if (command == "signtargets") { repo.signTargets(); - std::cout << "Signed the staged director targets metadata" << std::endl; + std::cout << "Signed the staged Director Targets metadata" << std::endl; } else if (command == "emptytargets") { repo.emptyTargets(); - std::cout << "Cleared the staged director targets metadata" << std::endl; + std::cout << "Cleared the staged Director Targets metadata" << std::endl; } else if (command == "oldtargets") { repo.oldTargets(); - std::cout << "Populated the director targets metadata with the currently signed metadata" << std::endl; + std::cout << "Populated the Director Targets metadata with the currently signed metadata" << std::endl; } else if (command == "sign") { if (vm.count("repotype") == 0 || vm.count("keyname") == 0) { std::cerr << "sign command requires --repotype and --keyname\n"; @@ -208,13 +227,30 @@ int main(int argc, char **argv) { correlation_id); auto json_to_sign = Utils::parseJSON(text_to_sign); - if (json_to_sign == Json::nullValue) { + if (json_to_sign.empty()) { std::cerr << "Text to sign must be valid json\n"; exit(EXIT_FAILURE); } auto json_signed = base_repo.signTuf(Uptane::Role(vm["keyname"].as()), json_to_sign); std::cout << Utils::jsonToCanonicalStr(json_signed); + } else if (command == "addcampaigns") { + repo.generateCampaigns(); + std::cout << "Generated campaigns" << std::endl; + } else if (command == "refresh") { + if (vm.count("repotype") == 0 || vm.count("keyname") == 0) { + std::cerr << "refresh command requires --repotype and --keyname\n"; + exit(EXIT_FAILURE); + } + repo.refresh(Uptane::RepositoryType(vm["repotype"].as()), + Uptane::Role(vm["keyname"].as())); + } else if (command == "rotate") { + if (vm.count("repotype") == 0) { + std::cerr << "refresh command requires --repotype\n"; + exit(EXIT_FAILURE); + } + KeyType key_type = parseKeyType(vm); + repo.rotate(Uptane::RepositoryType(vm["repotype"].as()), Uptane::Role::Root(), key_type); } else { std::cout << desc << std::endl; exit(EXIT_FAILURE); diff --git a/src/uptane_generator/repo.cc b/src/uptane_generator/repo.cc index e6e4dc3c54..9d21daf8ec 100644 --- a/src/uptane_generator/repo.cc +++ b/src/uptane_generator/repo.cc @@ -1,29 +1,29 @@ +#include "repo.h" + +#include +#include #include #include -#include "crypto/crypto.h" -#include "logging/logging.h" +#include "crypto/crypto.h" #include "director_repo.h" #include "image_repo.h" -#include "repo.h" +#include "libaktualizr/campaign.h" Repo::Repo(Uptane::RepositoryType repo_type, boost::filesystem::path path, const std::string &expires, std::string correlation_id) : repo_type_(repo_type), path_(std::move(path)), correlation_id_(std::move(correlation_id)) { expiration_time_ = getExpirationTime(expires); - if (boost::filesystem::exists(path_)) { - if (boost::filesystem::directory_iterator(path_) != boost::filesystem::directory_iterator()) { - readKeys(); - } - } + readKeys(); - if (repo_type == Uptane::RepositoryType("director")) { + if (repo_type_ == Uptane::RepositoryType::Director()) { repo_dir_ = path_ / DirectorRepo::dir; - } else if (repo_type == Uptane::RepositoryType("image")) { + } else if (repo_type_ == Uptane::RepositoryType::Image()) { repo_dir_ = path_ / ImageRepo::dir; } } +// NOLINTNEXTLINE(misc-no-recursion) void Repo::addDelegationToSnapshot(Json::Value *snapshot, const Uptane::Role &role) { boost::filesystem::path repo_dir = repo_dir_; if (role.IsDelegation()) { @@ -35,51 +35,60 @@ void Repo::addDelegationToSnapshot(Json::Value *snapshot, const Uptane::Role &ro std::string signed_role = Utils::readFile(repo_dir / role_file_name); (*snapshot)["meta"][role_file_name]["version"] = role_json["version"].asUInt(); + (*snapshot)["meta"][role_file_name]["length"] = signed_role.size(); + (*snapshot)["meta"][role_file_name]["hashes"]["sha256"] = Crypto::sha256digestHex(signed_role); if (role_json["delegations"].isObject()) { auto delegations_list = role_json["delegations"]["roles"]; - for (Json::ValueIterator it = delegations_list.begin(); it != delegations_list.end(); it++) { + for (auto it = delegations_list.begin(); it != delegations_list.end(); it++) { addDelegationToSnapshot(snapshot, Uptane::Role((*it)["name"].asString(), true)); } } } void Repo::updateRepo() { - boost::filesystem::path repo_dir = repo_dir_; - Json::Value old_snapshot = Utils::parseJSONFile(repo_dir / "snapshot.json")["signed"]; - + const Json::Value old_snapshot = Utils::parseJSONFile(repo_dir_ / "snapshot.json")["signed"]; Json::Value snapshot; snapshot["_type"] = "Snapshot"; snapshot["expires"] = old_snapshot["expires"]; snapshot["version"] = (old_snapshot["version"].asUInt()) + 1; - Json::Value root = Utils::parseJSONFile(repo_dir / "root.json")["signed"]; - std::string signed_root = Utils::readFile(repo_dir / "root.json"); - + const Json::Value root = Utils::parseJSONFile(repo_dir_ / "root.json")["signed"]; snapshot["meta"]["root.json"]["version"] = root["version"].asUInt(); addDelegationToSnapshot(&snapshot, Uptane::Role::Targets()); - std::string signed_snapshot = Utils::jsonToCanonicalStr(signTuf(Uptane::Role::Snapshot(), snapshot)); - Utils::writeFile(repo_dir / "snapshot.json", signed_snapshot); + const std::string signed_snapshot = Utils::jsonToCanonicalStr(signTuf(Uptane::Role::Snapshot(), snapshot)); + Utils::writeFile(repo_dir_ / "snapshot.json", signed_snapshot); - Json::Value timestamp = Utils::parseJSONFile(repo_dir / "timestamp.json")["signed"]; + Json::Value timestamp = Utils::parseJSONFile(repo_dir_ / "timestamp.json")["signed"]; timestamp["version"] = (timestamp["version"].asUInt()) + 1; - timestamp["meta"]["snapshot.json"]["hashes"]["sha256"] = - boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha256digest(signed_snapshot))); - timestamp["meta"]["snapshot.json"]["hashes"]["sha512"] = - boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha512digest(signed_snapshot))); + timestamp["meta"]["snapshot.json"]["hashes"]["sha256"] = Crypto::sha256digestHex(signed_snapshot); + timestamp["meta"]["snapshot.json"]["hashes"]["sha512"] = Crypto::sha512digestHex(signed_snapshot); timestamp["meta"]["snapshot.json"]["length"] = static_cast(signed_snapshot.length()); timestamp["meta"]["snapshot.json"]["version"] = snapshot["version"].asUInt(); - Utils::writeFile(repo_dir / "timestamp.json", + Utils::writeFile(repo_dir_ / "timestamp.json", Utils::jsonToCanonicalStr(signTuf(Uptane::Role::Timestamp(), timestamp))); } Json::Value Repo::signTuf(const Uptane::Role &role, const Json::Value &json) { auto key = keys_[role]; - std::string b64sig = - Utils::toBase64(Crypto::Sign(key.public_key.Type(), nullptr, key.private_key, Json::FastWriter().write(json))); + return signTuf(key, json); +} + +Json::Value Repo::signTuf(const KeyPair &key, const Json::Value &json) { + bool append = json.isMember("signed") && json.isMember("signatures"); + Json::Value json_to_sign; + Json::Value json_signed; + if (append) { + json_to_sign = json["signed"]; + json_signed = json; + } else { + json_to_sign = json; + } + std::string b64sig = Utils::toBase64( + Crypto::Sign(key.public_key.Type(), nullptr, key.private_key, Utils::jsonToCanonicalStr(json_to_sign))); Json::Value signature; switch (key.public_key.Type()) { case KeyType::kRSA2048: @@ -94,17 +103,17 @@ Json::Value Repo::signTuf(const Uptane::Role &role, const Json::Value &json) { throw std::runtime_error("Unknown key type"); } signature["sig"] = b64sig; - - Json::Value signed_data; signature["keyid"] = key.public_key.KeyId(); - signed_data["signed"] = json; - signed_data["signatures"].append(signature); - return signed_data; + if (!append) { + json_signed["signed"] = json_to_sign; + } + json_signed["signatures"].append(signature); + return json_signed; } std::string Repo::getExpirationTime(const std::string &expires) { - if (expires.size() != 0) { + if (!expires.empty()) { std::smatch match; std::regex time_pattern("\\d{4}\\-\\d{2}\\-\\d{2}T\\d{2}:\\d{2}:\\d{2}Z"); // NOLINT(modernize-raw-string-literal) if (!std::regex_match(expires, time_pattern)) { @@ -117,17 +126,20 @@ std::string Repo::getExpirationTime(const std::string &expires) { time(&raw_time); gmtime_r(&raw_time, &time_struct); time_struct.tm_year += 3; - char formatted[22]; - strftime(formatted, 22, "%Y-%m-%dT%H:%M:%SZ", &time_struct); - return formatted; + std::array formatted{}; + strftime(formatted.data(), formatted.size(), "%Y-%m-%dT%H:%M:%SZ", &time_struct); + return formatted.data(); } } void Repo::generateKeyPair(KeyType key_type, const Uptane::Role &key_name) { - boost::filesystem::path keys_dir = path_ / ("keys/" + repo_type_.toString() + "/" + key_name.ToString()); + boost::filesystem::path keys_dir = path_ / ("keys/" + repo_type_.ToString() + "/" + key_name.ToString()); boost::filesystem::create_directories(keys_dir); - std::string public_key_string, private_key; - Crypto::generateKeyPair(key_type, &public_key_string, &private_key); + std::string public_key_string; + std::string private_key; + if (!Crypto::generateKeyPair(key_type, &public_key_string, &private_key)) { + throw std::runtime_error("Key generation failure"); + } PublicKey public_key(public_key_string, key_type); std::stringstream key_str; @@ -177,7 +189,7 @@ void Repo::generateRepo(KeyType key_type) { role["keyids"].append(keys_[Uptane::Role::Timestamp()].public_key.KeyId()); root["roles"]["timestamp"] = role; - std::string signed_root = Utils::jsonToCanonicalStr(signTuf(Uptane::Role::Root(), root)); + const std::string signed_root = Utils::jsonToCanonicalStr(signTuf(Uptane::Role::Root(), root)); Utils::writeFile(repo_dir_ / "root.json", signed_root); Utils::writeFile(repo_dir_ / "1.root.json", signed_root); @@ -186,20 +198,18 @@ void Repo::generateRepo(KeyType key_type) { targets["expires"] = expiration_time_; targets["version"] = 1; targets["targets"] = Json::objectValue; - if (repo_type_ == Uptane::RepositoryType::Director() && correlation_id_ != "") { + if (repo_type_ == Uptane::RepositoryType::Director() && !correlation_id_.empty()) { targets["custom"]["correlationId"] = correlation_id_; } - std::string signed_targets = Utils::jsonToCanonicalStr(signTuf(Uptane::Role::Targets(), targets)); + const std::string signed_targets = Utils::jsonToCanonicalStr(signTuf(Uptane::Role::Targets(), targets)); Utils::writeFile(repo_dir_ / "targets.json", signed_targets); Json::Value snapshot; snapshot["_type"] = "Snapshot"; snapshot["expires"] = expiration_time_; snapshot["version"] = 1; - snapshot["meta"]["root.json"]["hashes"]["sha256"] = - boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha256digest(signed_root))); - snapshot["meta"]["root.json"]["hashes"]["sha512"] = - boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha512digest(signed_root))); + snapshot["meta"]["root.json"]["hashes"]["sha256"] = Crypto::sha256digestHex(signed_root); + snapshot["meta"]["root.json"]["hashes"]["sha512"] = Crypto::sha512digestHex(signed_root); snapshot["meta"]["root.json"]["length"] = static_cast(signed_root.length()); snapshot["meta"]["root.json"]["version"] = 1; snapshot["meta"]["targets.json"]["version"] = 1; @@ -210,10 +220,8 @@ void Repo::generateRepo(KeyType key_type) { timestamp["_type"] = "Timestamp"; timestamp["expires"] = expiration_time_; timestamp["version"] = 1; - timestamp["meta"]["snapshot.json"]["hashes"]["sha256"] = - boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha256digest(signed_snapshot))); - timestamp["meta"]["snapshot.json"]["hashes"]["sha512"] = - boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha512digest(signed_snapshot))); + timestamp["meta"]["snapshot.json"]["hashes"]["sha256"] = Crypto::sha256digestHex(signed_snapshot); + timestamp["meta"]["snapshot.json"]["hashes"]["sha512"] = Crypto::sha512digestHex(signed_snapshot); timestamp["meta"]["snapshot.json"]["length"] = static_cast(signed_snapshot.length()); timestamp["meta"]["snapshot.json"]["version"] = 1; Utils::writeFile(repo_dir_ / "timestamp.json", @@ -223,6 +231,25 @@ void Repo::generateRepo(KeyType key_type) { } } +void Repo::generateCampaigns() const { + std::vector campaigns; + campaigns.resize(1); + auto &c = campaigns[0]; + + c.name = "campaign1"; + c.id = "c2eb7e8d-8aa0-429d-883f-5ed8fdb2a493"; + c.size = 62470; + c.autoAccept = true; + c.description = "this is my message to show on the device"; + c.estInstallationDuration = 10; + c.estPreparationDuration = 20; + + Json::Value json; + campaign::Campaign::JsonFromCampaigns(campaigns, json); + + Utils::writeFile(path_ / "campaigns.json", Utils::jsonToCanonicalStr(json)); +} + Json::Value Repo::getTarget(const std::string &target_name) { const Json::Value image_targets = Utils::parseJSONFile(repo_dir_ / "targets.json")["signed"]; if (image_targets["targets"].isMember(target_name)) { @@ -245,7 +272,10 @@ Json::Value Repo::getTarget(const std::string &target_name) { } void Repo::readKeys() { - auto keys_path = path_ / "keys" / repo_type_.toString(); + auto keys_path = path_ / "keys" / repo_type_.ToString(); + if (!boost::filesystem::exists(keys_path)) { + return; + } for (auto &p : boost::filesystem::directory_iterator(keys_path)) { std::string public_key_string = Utils::readFile(p / "public.key"); std::istringstream key_type_str(Utils::readFile(p / "key_type")); @@ -258,6 +288,98 @@ void Repo::readKeys() { } } +void Repo::refresh(const Uptane::Role &role) { + if (repo_type_ == Uptane::RepositoryType::Director() && + (role == Uptane::Role::Timestamp() || role == Uptane::Role::Snapshot())) { + throw std::runtime_error("The " + role.ToString() + " in the Director repo is not currently supported."); + } + + boost::filesystem::path meta_path = repo_dir_; + if (role == Uptane::Role::Root()) { + meta_path /= "root.json"; + } else if (role == Uptane::Role::Timestamp()) { + meta_path /= "timestamp.json"; + } else if (role == Uptane::Role::Snapshot()) { + meta_path /= "snapshot.json"; + } else if (role == Uptane::Role::Targets()) { + meta_path /= "targets.json"; + } else { + throw std::runtime_error("Refreshing custom role " + role.ToString() + " is not currently supported."); + } + + // The only interesting part here is to increment the version. It could be + // interesting to allow changing the expiry, too. + Json::Value meta_raw = Utils::parseJSONFile(meta_path)["signed"]; + const unsigned version = meta_raw["version"].asUInt() + 1; + + auto current_expire_time = TimeStamp(meta_raw["expires"].asString()); + + if (current_expire_time.IsExpiredAt(TimeStamp::Now())) { + time_t new_expiration_time; + std::time(&new_expiration_time); + new_expiration_time += 60 * 60; // make it valid for the next hour + struct tm new_expiration_time_str {}; + gmtime_r(&new_expiration_time, &new_expiration_time_str); + + meta_raw["expires"] = TimeStamp(new_expiration_time_str).ToString(); + } + meta_raw["version"] = version; + const std::string signed_meta = Utils::jsonToCanonicalStr(signTuf(role, meta_raw)); + Utils::writeFile(meta_path, signed_meta); + + // Write a new numbered version of the Root if relevant. + if (role == Uptane::Role::Root()) { + std::stringstream root_name; + root_name << version << ".root.json"; + Utils::writeFile(repo_dir_ / root_name.str(), signed_meta); + } + + updateRepo(); +} + +void Repo::rotate(const Uptane::Role &role, KeyType key_type) { + if (role != Uptane::Role::Root()) { + throw std::runtime_error("Rotating the " + role.ToString() + " is not currently supported."); + } + + boost::filesystem::path meta_path = repo_dir_ / "root.json"; + Json::Value meta_raw = Utils::parseJSONFile(meta_path)["signed"]; + const unsigned version = meta_raw["version"].asUInt() + 1; + + auto current_expire_time = TimeStamp(meta_raw["expires"].asString()); + + if (current_expire_time.IsExpiredAt(TimeStamp::Now())) { + time_t new_expiration_time; + std::time(&new_expiration_time); + new_expiration_time += 60 * 60; // make it valid for the next hour + struct tm new_expiration_time_str {}; + gmtime_r(&new_expiration_time, &new_expiration_time_str); + + meta_raw["expires"] = TimeStamp(new_expiration_time_str).ToString(); + } + meta_raw["version"] = version; + + KeyPair old_key = keys_[role]; + generateKeyPair(key_type, role); + KeyPair new_key = keys_[role]; + meta_raw["keys"][new_key.public_key.KeyId()] = new_key.public_key.ToUptane(); + meta_raw["keys"].removeMember(old_key.public_key.KeyId()); + + meta_raw["roles"]["root"]["keyids"].clear(); + meta_raw["roles"]["root"]["keyids"].append(new_key.public_key.KeyId()); + + // Sign Root with old and new key + auto intermediate_meta = signTuf(role, meta_raw); + const std::string signed_meta = Utils::jsonToCanonicalStr(signTuf(old_key, intermediate_meta)); + Utils::writeFile(meta_path, signed_meta); + + std::stringstream root_name; + root_name << version << ".root.json"; + Utils::writeFile(repo_dir_ / root_name.str(), signed_meta); + + updateRepo(); +} + Delegation::Delegation(const boost::filesystem::path &repo_path, std::string delegation_name) : name(std::move(delegation_name)) { if (Uptane::Role::IsReserved(name)) { @@ -276,6 +398,7 @@ Delegation::Delegation(const boost::filesystem::path &repo_path, std::string del } } +// NOLINTNEXTLINE(misc-no-recursion) std::string Delegation::findPatternInTree(const boost::filesystem::path &repo_path, const std::string &name, const Json::Value &targets_json) { Json::Value delegations = targets_json["delegations"]; diff --git a/src/uptane_generator/repo.h b/src/uptane_generator/repo.h index 1384f8dfbb..4cc4c47f4c 100644 --- a/src/uptane_generator/repo.h +++ b/src/uptane_generator/repo.h @@ -2,10 +2,12 @@ #define REPO_H_ #include +#include +#include +#include -#include -#include #include "json/json.h" +#include "libaktualizr/types.h" #include "uptane/tuf.h" struct KeyPair { @@ -19,10 +21,10 @@ struct KeyPair { struct Delegation { Delegation() = default; Delegation(const boost::filesystem::path &repo_path, std::string delegation_name); - bool isMatched(const boost::filesystem::path &image_path) { + bool isMatched(const boost::filesystem::path &image_path) const { return (fnmatch(pattern.c_str(), image_path.c_str(), 0) == 0); } - operator bool() const { return (!name.empty() && !pattern.empty()); } + explicit operator bool() const { return (!name.empty() && !pattern.empty()); } std::string name; std::string pattern; @@ -38,11 +40,14 @@ class Repo { void generateRepo(KeyType key_type = KeyType::kRSA2048); Json::Value getTarget(const std::string &target_name); Json::Value signTuf(const Uptane::Role &role, const Json::Value &json); + void generateCampaigns() const; + void refresh(const Uptane::Role &role); + void rotate(const Uptane::Role &role, KeyType key_type = KeyType::kRSA2048); protected: void generateRepoKeys(KeyType key_type); void generateKeyPair(KeyType key_type, const Uptane::Role &key_name); - std::string getExpirationTime(const std::string &expires); + static std::string getExpirationTime(const std::string &expires); void readKeys(); void updateRepo(); Uptane::RepositoryType repo_type_; @@ -54,6 +59,7 @@ class Repo { private: void addDelegationToSnapshot(Json::Value *snapshot, const Uptane::Role &role); + static Json::Value signTuf(const KeyPair &key, const Json::Value &json); }; #endif // REPO_H_ diff --git a/src/uptane_generator/repo_test.cc b/src/uptane_generator/repo_test.cc index c69884c8e5..3183d2730c 100644 --- a/src/uptane_generator/repo_test.cc +++ b/src/uptane_generator/repo_test.cc @@ -5,37 +5,59 @@ #include -#include "config/config.h" +#include "crypto/crypto.h" +#include "libaktualizr/config.h" #include "logging/logging.h" #include "test_utils.h" +#include "uptane/exceptions.h" #include "uptane_repo.h" KeyType key_type = KeyType::kED25519; std::string generate_repo_exec; -void check_repo(boost::filesystem::path repo_dir) { - Json::Value targets = Utils::parseJSONFile(repo_dir / "targets.json")["signed"]; - std::string signed_targets = Utils::readFile(repo_dir / "targets.json"); - - Json::Value snapshot = Utils::parseJSONFile(repo_dir / "snapshot.json")["signed"]; - EXPECT_EQ(snapshot["meta"]["targets.json"]["version"].asUInt(), targets["version"].asUInt()); - - auto signed_snapshot = Utils::readFile(repo_dir / "snapshot.json"); - Json::Value timestamp = Utils::parseJSONFile(repo_dir / "timestamp.json")["signed"]; - EXPECT_EQ(timestamp["meta"]["snapshot.json"]["hashes"]["sha256"].asString(), - boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha256digest(signed_snapshot)))); - EXPECT_EQ(timestamp["meta"]["snapshot.json"]["hashes"]["sha512"].asString(), - boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha512digest(signed_snapshot)))); - EXPECT_EQ(timestamp["meta"]["snapshot.json"]["length"].asUInt(), static_cast(signed_snapshot.length())); - EXPECT_EQ(timestamp["meta"]["snapshot.json"]["version"].asUInt(), snapshot["version"].asUInt()); +// TODO: this could be a lot more robust. +void check_repo(const TemporaryDirectory &temp_dir, const Uptane::RepositoryType repo_type) { + boost::filesystem::path repo_dir; + if (repo_type == Uptane::RepositoryType::Director()) { + repo_dir = temp_dir.Path() / DirectorRepo::dir; + } else { + repo_dir = temp_dir.Path() / ImageRepo::dir; + } + + const auto root_raw = Utils::readFile(repo_dir / "root.json"); + auto root = Uptane::Root(repo_type, Utils::parseJSON(root_raw)); // initialization and format check + root = Uptane::Root(repo_type, Utils::parseJSON(root_raw), root); // signature verification against itself + const auto snapshot_raw = Utils::readFile(repo_dir / "snapshot.json"); + const auto snapshot = + Uptane::Snapshot(repo_type, Utils::parseJSON(snapshot_raw), std::make_shared(root)); + const auto timestamp_raw = Utils::readFile(repo_dir / "timestamp.json"); + const auto timestamp = + Uptane::TimestampMeta(repo_type, Utils::parseJSON(timestamp_raw), std::make_shared(root)); + const auto targets_raw = Utils::readFile(repo_dir / "targets.json"); + const auto targets = Uptane::Targets(repo_type, Uptane::Role::Targets(), Utils::parseJSON(targets_raw), + std::make_shared(root)); + // TODO: verify any delegations + + const Json::Value targets_signed = Utils::parseJSONFile(repo_dir / "targets.json")["signed"]; + const Json::Value snapshot_signed = Utils::parseJSONFile(repo_dir / "snapshot.json")["signed"]; + EXPECT_EQ(snapshot_signed["meta"]["targets.json"]["version"].asUInt(), targets_signed["version"].asUInt()); + + const Json::Value timestamp_signed = Utils::parseJSONFile(repo_dir / "timestamp.json")["signed"]; + EXPECT_EQ(timestamp_signed["meta"]["snapshot.json"]["hashes"]["sha256"].asString(), + Crypto::sha256digestHex(snapshot_raw)); + EXPECT_EQ(timestamp_signed["meta"]["snapshot.json"]["hashes"]["sha512"].asString(), + Crypto::sha512digestHex(snapshot_raw)); + EXPECT_EQ(timestamp_signed["meta"]["snapshot.json"]["length"].asUInt(), + static_cast(snapshot_raw.length())); + EXPECT_EQ(timestamp_signed["meta"]["snapshot.json"]["version"].asUInt(), snapshot_signed["version"].asUInt()); } void check_repo(const TemporaryDirectory &temp_dir) { - check_repo(temp_dir.Path() / ImageRepo::dir); - check_repo(temp_dir.Path() / DirectorRepo::dir); + check_repo(temp_dir, Uptane::RepositoryType::Image()); + check_repo(temp_dir, Uptane::RepositoryType::Director()); } /* - * Generate images and director repos. + * Generate Image and Director repos. */ TEST(uptane_generator, generate_repo) { TemporaryDirectory temp_dir; @@ -99,78 +121,105 @@ TEST(uptane_generator, generate_repo) { } /* - * Add an image to the images repo. + * Add an image to the Image repo. */ TEST(uptane_generator, add_image) { TemporaryDirectory temp_dir; UptaneRepo repo(temp_dir.Path(), "", ""); repo.generateRepo(key_type); repo.addImage(temp_dir.Path() / DirectorRepo::dir / "manifest", std::string(DirectorRepo::dir) + "/manifest", - "test-hw", "", {}); + "test-hw"); Json::Value image_targets = Utils::parseJSONFile(temp_dir.Path() / ImageRepo::dir / "targets.json"); EXPECT_EQ(image_targets["signed"]["targets"].size(), 1); EXPECT_FALSE( image_targets["signed"]["targets"][std::string(DirectorRepo::dir) + "/manifest"]["custom"].isMember("uri")); + EXPECT_FALSE( + image_targets["signed"]["targets"][std::string(DirectorRepo::dir) + "/manifest"]["custom"].isMember("version")); Json::Value director_targets = Utils::parseJSONFile(temp_dir.Path() / DirectorRepo::dir / "targets.json"); EXPECT_EQ(director_targets["signed"]["targets"].size(), 0); check_repo(temp_dir); } /* - * Copy an image to the director repo. + * Copy an image to the Director repo. */ TEST(uptane_generator, copy_image) { TemporaryDirectory temp_dir; UptaneRepo repo(temp_dir.Path(), "", ""); repo.generateRepo(key_type); - repo.addImage(temp_dir.Path() / DirectorRepo::dir / "manifest", "manifest", "test-hw", "", {}); - repo.addTarget("manifest", "test-hw", "test-serial", ""); + repo.addImage(temp_dir.Path() / DirectorRepo::dir / "manifest", "manifest", "test-hw"); + repo.addTarget("manifest", "test-hw", "test-serial"); repo.signTargets(); Json::Value image_targets = Utils::parseJSONFile(temp_dir.Path() / ImageRepo::dir / "targets.json"); EXPECT_EQ(image_targets["signed"]["targets"].size(), 1); EXPECT_FALSE(image_targets["signed"]["targets"]["manifest"]["custom"].isMember("uri")); + EXPECT_FALSE(image_targets["signed"]["targets"]["manifest"]["custom"].isMember("version")); Json::Value director_targets = Utils::parseJSONFile(temp_dir.Path() / DirectorRepo::dir / "targets.json"); EXPECT_EQ(director_targets["signed"]["targets"].size(), 1); EXPECT_FALSE(director_targets["signed"]["targets"]["manifest"]["custom"].isMember("uri")); + EXPECT_FALSE(director_targets["signed"]["targets"]["manifest"]["custom"].isMember("version")); check_repo(temp_dir); } /* - * Add an image to the images repo with a custom URL. + * Add an image to the Image repo with a custom URL. */ TEST(uptane_generator, image_custom_url) { TemporaryDirectory temp_dir; UptaneRepo repo(temp_dir.Path(), "", ""); repo.generateRepo(key_type); - repo.addImage(temp_dir.Path() / DirectorRepo::dir / "manifest", "manifest", "test-hw", "test-url", {}); - repo.addTarget("manifest", "test-hw", "test-serial", ""); + repo.addImage(temp_dir.Path() / DirectorRepo::dir / "manifest", "manifest", "test-hw", "test-url"); + repo.addTarget("manifest", "test-hw", "test-serial"); repo.signTargets(); Json::Value image_targets = Utils::parseJSONFile(temp_dir.Path() / ImageRepo::dir / "targets.json"); EXPECT_EQ(image_targets["signed"]["targets"].size(), 1); EXPECT_EQ(image_targets["signed"]["targets"]["manifest"]["custom"]["uri"], "test-url"); + EXPECT_FALSE(image_targets["signed"]["targets"]["manifest"]["custom"].isMember("version")); Json::Value director_targets = Utils::parseJSONFile(temp_dir.Path() / DirectorRepo::dir / "targets.json"); EXPECT_EQ(director_targets["signed"]["targets"].size(), 1); EXPECT_FALSE(director_targets["signed"]["targets"]["manifest"]["custom"].isMember("uri")); + EXPECT_FALSE(director_targets["signed"]["targets"]["manifest"]["custom"].isMember("version")); check_repo(temp_dir); } /* - * Add an image to the images repo with a custom URL. - * Copy an image to the director repo with a custom URL. + * Add an image to the Image repo with a custom URL. + * Copy an image to the Director repo with a custom URL. */ TEST(uptane_generator, both_custom_url) { TemporaryDirectory temp_dir; UptaneRepo repo(temp_dir.Path(), "", ""); repo.generateRepo(key_type); - repo.addImage(temp_dir.Path() / DirectorRepo::dir / "manifest", "manifest", "test-hw", "test-url", {}); + repo.addImage(temp_dir.Path() / DirectorRepo::dir / "manifest", "manifest", "test-hw", "test-url"); repo.addTarget("manifest", "test-hw", "test-serial", "test-url2"); repo.signTargets(); Json::Value image_targets = Utils::parseJSONFile(temp_dir.Path() / ImageRepo::dir / "targets.json"); EXPECT_EQ(image_targets["signed"]["targets"].size(), 1); EXPECT_EQ(image_targets["signed"]["targets"]["manifest"]["custom"]["uri"], "test-url"); + EXPECT_FALSE(image_targets["signed"]["targets"]["manifest"]["custom"].isMember("version")); Json::Value director_targets = Utils::parseJSONFile(temp_dir.Path() / DirectorRepo::dir / "targets.json"); EXPECT_EQ(director_targets["signed"]["targets"].size(), 1); EXPECT_EQ(director_targets["signed"]["targets"]["manifest"]["custom"]["uri"], "test-url2"); + EXPECT_FALSE(director_targets["signed"]["targets"]["manifest"]["custom"].isMember("version")); + check_repo(temp_dir); +} + +/* + * Add an image to the Image repo with a custom version. + */ +TEST(uptane_generator, image_custom_version) { + TemporaryDirectory temp_dir; + UptaneRepo repo(temp_dir.Path(), "", ""); + repo.generateRepo(key_type); + repo.addImage(temp_dir.Path() / DirectorRepo::dir / "manifest", "manifest", "test-hw", "", 42); + repo.addTarget("manifest", "test-hw", "test-serial"); + repo.signTargets(); + Json::Value image_targets = Utils::parseJSONFile(temp_dir.Path() / ImageRepo::dir / "targets.json"); + EXPECT_EQ(image_targets["signed"]["targets"].size(), 1); + EXPECT_EQ(image_targets["signed"]["targets"]["manifest"]["custom"]["version"], 42); + Json::Value director_targets = Utils::parseJSONFile(temp_dir.Path() / DirectorRepo::dir / "targets.json"); + EXPECT_EQ(director_targets["signed"]["targets"].size(), 1); + EXPECT_FALSE(director_targets["signed"]["targets"]["manifest"]["custom"].isMember("version")); check_repo(temp_dir); } @@ -343,7 +392,7 @@ TEST(uptane_generator, sign) { cmd = generate_repo_exec + " sign " + temp_dir.Path().string(); cmd += " --repotype director --keyname snapshot"; std::string sign_cmd = - "echo \"{\\\"_type\\\":\\\"Snapshot\\\",\\\"expires\\\":\\\"2021-07-04T16:33:27Z\\\"}\" | " + cmd; + "echo \"{\\\"_type\\\":\\\"Snapshot\\\",\\\"expires\\\":\\\"2025-07-04T16:33:27Z\\\"}\" | " + cmd; output.clear(); retval = Utils::shell(sign_cmd, &output); if (retval) { @@ -382,11 +431,12 @@ TEST(uptane_generator, image_custom) { EXPECT_EQ(image_targets["signed"]["targets"].size(), 1); EXPECT_EQ(image_targets["signed"]["targets"]["target1"]["length"].asUInt(), 123); EXPECT_FALSE(image_targets["signed"]["targets"]["target1"]["custom"].isMember("uri")); + EXPECT_FALSE(image_targets["signed"]["targets"]["target1"]["custom"].isMember("version")); check_repo(temp_dir); } /* - * Clear the staged director targets metadata. + * Clear the staged Director Targets metadata. */ TEST(uptane_generator, emptytargets) { TemporaryDirectory temp_dir; @@ -432,18 +482,18 @@ TEST(uptane_generator, emptytargets) { } /* - * Populate the director targets metadata with the currently signed metadata. + * Populate the Director Targets metadata with the currently signed metadata. */ TEST(uptane_generator, oldtargets) { TemporaryDirectory temp_dir; UptaneRepo repo(temp_dir.Path(), "", ""); repo.generateRepo(key_type); - Uptane::Hash hash(Uptane::Hash::Type::kSha256, "8ab755c16de6ee9b6224169b36cbf0f2a545f859be385501ad82cdccc240d0a6"); - repo.addCustomImage("target1", hash, 123, "test-hw", ""); - repo.addCustomImage("target2", hash, 321, "test-hw", ""); - repo.addTarget("target1", "test-hw", "test-serial", ""); + Hash hash(Hash::Type::kSha256, "8ab755c16de6ee9b6224169b36cbf0f2a545f859be385501ad82cdccc240d0a6"); + repo.addCustomImage("target1", hash, 123, "test-hw"); + repo.addCustomImage("target2", hash, 321, "test-hw"); + repo.addTarget("target1", "test-hw", "test-serial"); repo.signTargets(); - repo.addTarget("target2", "test-hw", "test-serial", ""); + repo.addTarget("target2", "test-hw", "test-serial"); Json::Value targets = Utils::parseJSONFile(temp_dir.Path() / DirectorRepo::dir / "staging/targets.json"); EXPECT_EQ(targets["targets"].size(), 2); @@ -475,6 +525,158 @@ TEST(uptane_generator, oldtargets) { check_repo(temp_dir); } +/* + * Generate campaigns json in metadata dir. + */ +TEST(uptane_generator, generateCampaigns) { + TemporaryDirectory temp_dir; + UptaneRepo repo(temp_dir.Path(), "", ""); + repo.generateRepo(key_type); + repo.generateCampaigns(); + Json::Value campaigns = Utils::parseJSONFile(temp_dir.Path() / "campaigns.json"); + + EXPECT_EQ(campaigns["campaigns"][0]["name"], "campaign1"); + EXPECT_EQ(campaigns["campaigns"][0]["id"], "c2eb7e8d-8aa0-429d-883f-5ed8fdb2a493"); + EXPECT_EQ((campaigns["campaigns"][0]["size"]).asInt64(), 62470); + EXPECT_EQ(campaigns["campaigns"][0]["autoAccept"], true); + EXPECT_EQ(campaigns["campaigns"][0]["metadata"][0]["type"], "DESCRIPTION"); + EXPECT_EQ(campaigns["campaigns"][0]["metadata"][0]["value"], "this is my message to show on the device"); + EXPECT_EQ(campaigns["campaigns"][0]["metadata"][1]["type"], "ESTIMATED_INSTALLATION_DURATION"); + EXPECT_EQ(campaigns["campaigns"][0]["metadata"][1]["value"], "10"); + EXPECT_EQ(campaigns["campaigns"][0]["metadata"][2]["type"], "ESTIMATED_PREPARATION_DURATION"); + EXPECT_EQ(campaigns["campaigns"][0]["metadata"][2]["value"], "20"); +} + +void checkVersions(const TemporaryDirectory &temp_dir, const int droot_ver, const int dtime_ver, const int dsnap_ver, + const int dtargets_ver, const int iroot_ver, const int itime_ver, const int isnap_ver, + const int itargets_ver) { + const Json::Value director_root = Utils::parseJSONFile(temp_dir.Path() / DirectorRepo::dir / "root.json"); + EXPECT_EQ(director_root["signed"]["version"].asUInt(), droot_ver); + const Json::Value director_timestamp = Utils::parseJSONFile(temp_dir.Path() / DirectorRepo::dir / "timestamp.json"); + EXPECT_EQ(director_timestamp["signed"]["version"].asUInt(), dtime_ver); + const Json::Value director_snapshot = Utils::parseJSONFile(temp_dir.Path() / DirectorRepo::dir / "snapshot.json"); + EXPECT_EQ(director_snapshot["signed"]["version"].asUInt(), dsnap_ver); + const Json::Value director_targets = Utils::parseJSONFile(temp_dir.Path() / DirectorRepo::dir / "targets.json"); + EXPECT_EQ(director_targets["signed"]["version"].asUInt(), dtargets_ver); + + const Json::Value image_root = Utils::parseJSONFile(temp_dir.Path() / ImageRepo::dir / "root.json"); + EXPECT_EQ(image_root["signed"]["version"].asUInt(), iroot_ver); + const Json::Value image_timestamp = Utils::parseJSONFile(temp_dir.Path() / ImageRepo::dir / "timestamp.json"); + EXPECT_EQ(image_timestamp["signed"]["version"].asUInt(), itime_ver); + const Json::Value image_snapshot = Utils::parseJSONFile(temp_dir.Path() / ImageRepo::dir / "snapshot.json"); + EXPECT_EQ(image_snapshot["signed"]["version"].asUInt(), isnap_ver); + const Json::Value image_targets = Utils::parseJSONFile(temp_dir.Path() / ImageRepo::dir / "targets.json"); + EXPECT_EQ(image_targets["signed"]["version"].asUInt(), itargets_ver); +} + +/* + * Bump the version of the Director Root metadata. + */ +TEST(uptane_generator, refreshDirectorRoot) { + TemporaryDirectory temp_dir; + UptaneRepo repo(temp_dir.Path(), "", ""); + repo.generateRepo(key_type); + repo.refresh(Uptane::RepositoryType::Director(), Uptane::Role::Root()); + + checkVersions(temp_dir, 2, 2, 2, 1, 1, 1, 1, 1); + check_repo(temp_dir); +} + +/* + * Bump the version of the Director Targets metadata. + */ +TEST(uptane_generator, refreshDirectorTargets) { + TemporaryDirectory temp_dir; + UptaneRepo repo(temp_dir.Path(), "", ""); + repo.generateRepo(key_type); + repo.refresh(Uptane::RepositoryType::Director(), Uptane::Role::Targets()); + + checkVersions(temp_dir, 1, 2, 2, 2, 1, 1, 1, 1); + check_repo(temp_dir); +} + +/* + * Bump the version of the Image repo Root metadata. + */ +TEST(uptane_generator, refreshImageRoot) { + TemporaryDirectory temp_dir; + UptaneRepo repo(temp_dir.Path(), "", ""); + repo.generateRepo(key_type); + repo.refresh(Uptane::RepositoryType::Image(), Uptane::Role::Root()); + + checkVersions(temp_dir, 1, 1, 1, 1, 2, 2, 2, 1); + check_repo(temp_dir); +} + +/* + * Bump the version of the Image repo Targets metadata. + */ +TEST(uptane_generator, refreshImageTargets) { + TemporaryDirectory temp_dir; + UptaneRepo repo(temp_dir.Path(), "", ""); + repo.generateRepo(key_type); + repo.refresh(Uptane::RepositoryType::Image(), Uptane::Role::Targets()); + + checkVersions(temp_dir, 1, 1, 1, 1, 1, 2, 2, 2); + check_repo(temp_dir); +} + +void test_rotation(const Uptane::RepositoryType repo_type) { + TemporaryDirectory temp_dir; + + boost::filesystem::path repo_dir; + if (repo_type == Uptane::RepositoryType::Director()) { + repo_dir = temp_dir.Path() / DirectorRepo::dir; + } else { + repo_dir = temp_dir.Path() / ImageRepo::dir; + } + + UptaneRepo repo(temp_dir.Path(), "", ""); + repo.generateRepo(key_type); + repo.rotate(repo_type, Uptane::Role::Root(), key_type); + + const auto root1_raw = Utils::readFile(repo_dir / "1.root.json"); + const auto root2_raw = Utils::readFile(repo_dir / "2.root.json"); + auto root = Uptane::Root(repo_type, Utils::parseJSON(root1_raw)); // initialization and format check + root = Uptane::Root(repo_type, Utils::parseJSON(root1_raw), + root); // signature verification against itself + root = Uptane::Root(repo_type, Utils::parseJSON(root2_raw), root); // verify new Root + + if (repo_type == Uptane::RepositoryType::Director()) { + checkVersions(temp_dir, 2, 2, 2, 1, 1, 1, 1, 1); + } else { + checkVersions(temp_dir, 1, 1, 1, 1, 2, 2, 2, 1); + } + check_repo(temp_dir); + + repo.rotate(repo_type, Uptane::Role::Root(), key_type); + const auto root3_raw = Utils::readFile(repo_dir / "3.root.json"); + root = Uptane::Root(repo_type, Utils::parseJSON(root3_raw), root); // verify new Root + + if (repo_type == Uptane::RepositoryType::Director()) { + checkVersions(temp_dir, 3, 3, 3, 1, 1, 1, 1, 1); + } else { + checkVersions(temp_dir, 1, 1, 1, 1, 3, 3, 3, 1); + } + check_repo(temp_dir); + + // Skip v2 and thus expect failure when verifying v3. + root = Uptane::Root(repo_type, Utils::parseJSON(root1_raw)); // initialization and format check + root = Uptane::Root(repo_type, Utils::parseJSON(root1_raw), + root); // signature verification against itself + EXPECT_THROW(Uptane::Root(repo_type, Utils::parseJSON(root3_raw), root), Uptane::UnmetThreshold); +} + +/* + * Rotate the Director Root. + */ +TEST(uptane_generator, rotateDirectorRoot) { test_rotation(Uptane::RepositoryType::Director()); } + +/* + * Rotate the Image repo Root. + */ +TEST(uptane_generator, rotateImageRoot) { test_rotation(Uptane::RepositoryType::Image()); } + #ifndef __NO_MAIN__ int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); diff --git a/src/uptane_generator/run/create_repo.sh b/src/uptane_generator/run/create_repo.sh index 635824094f..23de9468fc 100755 --- a/src/uptane_generator/run/create_repo.sh +++ b/src/uptane_generator/run/create_repo.sh @@ -1,14 +1,14 @@ #!/bin/bash -repo_dir=$(realpath ${1}) +repo_dir=$(realpath "${1}") host_addr=${2} ip_port=9000 gen_certs () { certs_dir=${repo_dir}/certs - mkdir -p ${certs_dir}/client - mkdir -p ${certs_dir}/server - cat <${certs_dir}/ca.cnf + mkdir -p "${certs_dir}"/client + mkdir -p "${certs_dir}"/server + cat <"${certs_dir}"/ca.cnf [req] req_extensions = cacert distinguished_name = req_distinguished_name @@ -20,45 +20,45 @@ basicConstraints = critical,CA:true keyUsage = keyCertSign EOF # CA certificate for client devices - openssl genrsa -out ${certs_dir}/client/ca.private.pem 4096 - openssl req -key ${certs_dir}/client/ca.private.pem -new -x509 -days 7300 -out ${certs_dir}/client/cacert.pem -subj "/C=DE/ST=Berlin/O=Reis und Kichererbsen e.V/commonName=uptane-generator-client-ca" -batch -config ${certs_dir}/ca.cnf -extensions cacert + openssl genrsa -out "${certs_dir}"/client/ca.private.pem 4096 + openssl req -key "${certs_dir}"/client/ca.private.pem -new -x509 -days 7300 -out "${certs_dir}"/client/cacert.pem -subj "/C=DE/ST=Berlin/O=Reis und Kichererbsen e.V/commonName=uptane-generator-client-ca" -batch -config "${certs_dir}"/ca.cnf -extensions cacert # certificate for stand-alone device - openssl req -out ${certs_dir}/client/standalone_device_cert.csr -subj "/C=DE/ST=Berlin/O=Reis und Kichererbsen e.V/commonName=sota_device" -batch -new -newkey rsa:2048 -nodes -keyout ${certs_dir}/client/standalone_device_key.pem - openssl x509 -req -in ${certs_dir}/client/standalone_device_cert.csr -CA ${certs_dir}/client/cacert.pem -CAkey ${certs_dir}/client/ca.private.pem -CAcreateserial -out ${certs_dir}/client/standalone_device_cert.pem + openssl req -out "${certs_dir}"/client/standalone_device_cert.csr -subj "/C=DE/ST=Berlin/O=Reis und Kichererbsen e.V/commonName=sota_device" -batch -new -newkey rsa:2048 -nodes -keyout "${certs_dir}"/client/standalone_device_key.pem + openssl x509 -req -in "${certs_dir}"/client/standalone_device_cert.csr -CA "${certs_dir}"/client/cacert.pem -CAkey "${certs_dir}"/client/ca.private.pem -CAcreateserial -out "${certs_dir}"/client/standalone_device_cert.pem # CA certificate for server - openssl genrsa -out ${certs_dir}/server/ca.private.pem 4096 - openssl req -key ${certs_dir}/server/ca.private.pem -new -x509 -days 7300 -out ${certs_dir}/server/cacert.pem -subj "/C=DE/ST=Berlin/O=Reis und Kichererbsen e.V/commonName=uptane-generator-server-ca" -batch -config ${certs_dir}/ca.cnf -extensions cacert + openssl genrsa -out "${certs_dir}"/server/ca.private.pem 4096 + openssl req -key "${certs_dir}"/server/ca.private.pem -new -x509 -days 7300 -out "${certs_dir}"/server/cacert.pem -subj "/C=DE/ST=Berlin/O=Reis und Kichererbsen e.V/commonName=uptane-generator-server-ca" -batch -config "${certs_dir}"/ca.cnf -extensions cacert # server's certificate - openssl req -out ${certs_dir}/server/cert.csr -subj "/C=DE/ST=Berlin/O=Reis und Kichererbsen e.V/commonName=${host_addr}" -batch -new -newkey rsa:2048 -nodes -keyout ${certs_dir}/server/private.pem - openssl x509 -req -in ${certs_dir}/server/cert.csr -CA ${certs_dir}/server/cacert.pem -CAkey ${certs_dir}/server/ca.private.pem -CAcreateserial -out ${certs_dir}/server/cert.pem + openssl req -out "${certs_dir}"/server/cert.csr -subj "/C=DE/ST=Berlin/O=Reis und Kichererbsen e.V/commonName=${host_addr}" -batch -new -newkey rsa:2048 -nodes -keyout "${certs_dir}"/server/private.pem + openssl x509 -req -in "${certs_dir}"/server/cert.csr -CA "${certs_dir}"/server/cacert.pem -CAkey "${certs_dir}"/server/ca.private.pem -CAcreateserial -out "${certs_dir}"/server/cert.pem # bootstrap credentials for device registration. Will not be probably ever used, just to make boostrap process happy - openssl req -out ${certs_dir}/server/device_bootstrap_cert.csr -subj "/C=DE/ST=Berlin/O=Reis und Kichererbsen e.V/commonName=device_registation" -batch -new -newkey rsa:2048 -nodes -keyout ${certs_dir}/server/device_bootstrap_private.pem - openssl x509 -req -in ${certs_dir}/server/device_bootstrap_cert.csr -CA ${certs_dir}/server/cacert.pem -CAkey ${certs_dir}/server/ca.private.pem -CAcreateserial -out ${certs_dir}/server/device_bootstrap_cert.pem + openssl req -out "${certs_dir}"/server/device_bootstrap_cert.csr -subj "/C=DE/ST=Berlin/O=Reis und Kichererbsen e.V/commonName=device_registation" -batch -new -newkey rsa:2048 -nodes -keyout "${certs_dir}"/server/device_bootstrap_private.pem + openssl x509 -req -in "${certs_dir}"/server/device_bootstrap_cert.csr -CA "${certs_dir}"/server/cacert.pem -CAkey "${certs_dir}"/server/ca.private.pem -CAcreateserial -out "${certs_dir}"/server/device_bootstrap_cert.pem } gen_repo () { - uptane-generator --command generate --path ${repo_dir}/uptane --keytype ED25519 --expires "3021-07-04T00:00:00Z" + uptane-generator --command generate --path "${repo_dir}"/uptane --keytype ED25519 --expires "3021-07-04T00:00:00Z" } gen_ostree () { - ostree --repo=${repo_dir}/ostree init --mode=archive-z2 + ostree --repo="${repo_dir}"/ostree init --mode=archive-z2 } gen_credentials () { TEMPDIR=$(mktemp -d) - echo -n "https://${host_addr}:${ip_port}" >${TEMPDIR}/autoprov.url - openssl pkcs12 -export -out ${TEMPDIR}/autoprov_credentials.p12 -in ${repo_dir}/certs/server/device_bootstrap_cert.pem -inkey ${repo_dir}/certs/server/device_bootstrap_private.pem -CAfile ${repo_dir}/certs/server/cacert.pem -chain -password pass: -passin pass: + echo -n "https://${host_addr}:${ip_port}" >"${TEMPDIR}"/autoprov.url + openssl pkcs12 -export -out "${TEMPDIR}"/autoprov_credentials.p12 -in "${repo_dir}"/certs/server/device_bootstrap_cert.pem -inkey "${repo_dir}"/certs/server/device_bootstrap_private.pem -CAfile "${repo_dir}"/certs/server/cacert.pem -chain -password pass: -passin pass: CURDIR=$(pwd) - cd "$TEMPDIR" - zip ${repo_dir}/credentials.zip ./* - cd "$CURDIR" - rm -r ${TEMPDIR} + cd "$TEMPDIR" || exit + zip "${repo_dir}"/credentials.zip ./* + cd "$CURDIR" || exit + rm -r "${TEMPDIR}" } gen_site_conf () { - cat <${repo_dir}/site.conf + cat <"${repo_dir}"/site.conf OSTREE_REPO = "${repo_dir}/ostree" SOTA_PACKED_CREDENTIALS = "${repo_dir}/credentials.zip" SOTA_CLIENT_PROV = "aktualizr-device-prov" @@ -68,10 +68,10 @@ EOF } gen_local_toml () { - mkdir -p ${repo_dir}/var_sota - echo -n "https://${host_addr}:${ip_port}" >${repo_dir}/var_sota/gateway.url - chmod 744 ${repo_dir}/var_sota - cat << EOF >${repo_dir}/sota.toml + mkdir -p "${repo_dir}"/var_sota + echo -n "https://${host_addr}:${ip_port}" >"${repo_dir}"/var_sota/gateway.url + chmod 744 "${repo_dir}"/var_sota + cat << EOF >"${repo_dir}"/sota.toml [tls] server_url_path = "${repo_dir}/var_sota/gateway.url" @@ -80,7 +80,6 @@ provision_path = "${repo_dir}/credentials.zip" primary_ecu_hardware_id = "desktop" [storage] -type = "sqlite" path = "${repo_dir}/var_sota" sqldb_path = "${repo_dir}/var_sota/sql.db" @@ -94,9 +93,9 @@ tls_pkey_path = "${certs_dir}/client/standalone_device_key.pem" EOF } -if [ -e ${repo_dir} ]; then +if [ -e "${repo_dir}" ]; then echo "File or directory ${1} already exists" - exit -1 + exit 1 fi rm -rf "$repo_dir" diff --git a/src/uptane_generator/uptane_repo.cc b/src/uptane_generator/uptane_repo.cc index 6680ff790e..a03bb77244 100644 --- a/src/uptane_generator/uptane_repo.cc +++ b/src/uptane_generator/uptane_repo.cc @@ -9,13 +9,14 @@ void UptaneRepo::generateRepo(KeyType key_type) { director_repo_.generateRepo(key_type); image_repo_.generateRepo(key_type); } + void UptaneRepo::addTarget(const std::string &target_name, const std::string &hardware_id, - const std::string &ecu_serial, const std::string &url) { + const std::string &ecu_serial, const std::string &url, const std::string &expires) { auto target = image_repo_.getTarget(target_name); - if (target == Json::nullValue) { + if (target.empty()) { throw std::runtime_error("No such " + target_name + " target in the image repository"); } - director_repo_.addTarget(target_name, target, hardware_id, ecu_serial, url); + director_repo_.addTarget(target_name, target, hardware_id, ecu_serial, url, expires); } void UptaneRepo::addDelegation(const Uptane::Role &name, const Uptane::Role &parent_role, const std::string &path, @@ -29,16 +30,33 @@ void UptaneRepo::revokeDelegation(const Uptane::Role &name) { } void UptaneRepo::addImage(const boost::filesystem::path &image_path, const boost::filesystem::path &targetname, - const std::string &hardware_id, const std::string &url, const Delegation &delegation) { - image_repo_.addBinaryImage(image_path, targetname, hardware_id, url, delegation); + const std::string &hardware_id, const std::string &url, const int32_t custom_version, + const Delegation &delegation, const Json::Value &custom) { + image_repo_.addBinaryImage(image_path, targetname, hardware_id, url, custom_version, delegation, custom); } -void UptaneRepo::addCustomImage(const std::string &name, const Uptane::Hash &hash, uint64_t length, - const std::string &hardware_id, const std::string &url, const Delegation &delegation, - const Json::Value &custom) { - image_repo_.addCustomImage(name, hash, length, hardware_id, url, delegation, custom); +void UptaneRepo::addCustomImage(const std::string &name, const Hash &hash, uint64_t length, + const std::string &hardware_id, const std::string &url, const int32_t custom_version, + const Delegation &delegation, const Json::Value &custom) { + image_repo_.addCustomImage(name, hash, length, hardware_id, url, custom_version, delegation, custom); } void UptaneRepo::signTargets() { director_repo_.signTargets(); } - void UptaneRepo::emptyTargets() { director_repo_.emptyTargets(); } void UptaneRepo::oldTargets() { director_repo_.oldTargets(); } +void UptaneRepo::generateCampaigns() { director_repo_.generateCampaigns(); } + +void UptaneRepo::refresh(Uptane::RepositoryType repo_type, const Uptane::Role &role) { + if (repo_type == Uptane::RepositoryType::Director()) { + director_repo_.refresh(role); + } else if (repo_type == Uptane::RepositoryType::Image()) { + image_repo_.refresh(role); + } +} + +void UptaneRepo::rotate(Uptane::RepositoryType repo_type, const Uptane::Role &role, KeyType key_type) { + if (repo_type == Uptane::RepositoryType::Director()) { + director_repo_.rotate(role, key_type); + } else if (repo_type == Uptane::RepositoryType::Image()) { + image_repo_.rotate(role, key_type); + } +} diff --git a/src/uptane_generator/uptane_repo.h b/src/uptane_generator/uptane_repo.h index bb2a666a9d..081c540b58 100644 --- a/src/uptane_generator/uptane_repo.h +++ b/src/uptane_generator/uptane_repo.h @@ -9,18 +9,22 @@ class UptaneRepo { UptaneRepo(const boost::filesystem::path &path, const std::string &expires, const std::string &correlation_id); void generateRepo(KeyType key_type = KeyType::kRSA2048); void addTarget(const std::string &target_name, const std::string &hardware_id, const std::string &ecu_serial, - const std::string &url); + const std::string &url = "", const std::string &expires = ""); void addImage(const boost::filesystem::path &image_path, const boost::filesystem::path &targetname, - const std::string &hardware_id, const std::string &url, const Delegation &delegation); + const std::string &hardware_id, const std::string &url = "", int32_t custom_version = 0, + const Delegation &delegation = {}, const Json::Value &custom = {}); + void addCustomImage(const std::string &name, const Hash &hash, uint64_t length, const std::string &hardware_id, + const std::string &url = "", int32_t custom_version = 0, const Delegation &delegation = {}, + const Json::Value &custom = {}); void addDelegation(const Uptane::Role &name, const Uptane::Role &parent_role, const std::string &path, bool terminating, KeyType key_type); void revokeDelegation(const Uptane::Role &name); - void addCustomImage(const std::string &name, const Uptane::Hash &hash, uint64_t length, - const std::string &hardware_id, const std::string &url, const Delegation &delegation = {}, - const Json::Value &custom = {}); void signTargets(); void emptyTargets(); void oldTargets(); + void generateCampaigns(); + void refresh(Uptane::RepositoryType repo_type, const Uptane::Role &role); + void rotate(Uptane::RepositoryType repo_type, const Uptane::Role &role, KeyType key_type = KeyType::kRSA2048); private: DirectorRepo director_repo_; diff --git a/src/virtual_secondary/CMakeLists.txt b/src/virtual_secondary/CMakeLists.txt index 2017fe7547..7d75b4ad37 100644 --- a/src/virtual_secondary/CMakeLists.txt +++ b/src/virtual_secondary/CMakeLists.txt @@ -1,6 +1,6 @@ -set(SOURCES managedsecondary.cc virtualsecondary.cc partialverificationsecondary.cc) +set(SOURCES managedsecondary.cc virtualsecondary.cc) -set(HEADERS managedsecondary.h virtualsecondary.h partialverificationsecondary.h) +set(HEADERS managedsecondary.h virtualsecondary.h) set(TARGET virtual_secondary) diff --git a/src/virtual_secondary/managedsecondary.cc b/src/virtual_secondary/managedsecondary.cc index b25c27bd53..e8e858d09d 100644 --- a/src/virtual_secondary/managedsecondary.cc +++ b/src/virtual_secondary/managedsecondary.cc @@ -1,7 +1,6 @@ #include "managedsecondary.h" #include -#include #include #include @@ -9,197 +8,183 @@ #include "crypto/crypto.h" #include "logging/logging.h" - -#include +#include "storage/invstorage.h" +#include "uptane/directorrepository.h" +#include "uptane/imagerepository.h" +#include "uptane/manifest.h" +#include "uptane/tuf.h" +#include "utilities/utils.h" namespace Primary { ManagedSecondary::ManagedSecondary(Primary::ManagedSecondaryConfig sconfig_in) : sconfig(std::move(sconfig_in)) { - // TODO: FIX - // loadMetadata(meta_pack); - std::string public_key_string; - - if (!loadKeys(&public_key_string, &private_key)) { - if (!Crypto::generateKeyPair(sconfig.key_type, &public_key_string, &private_key)) { - LOG_ERROR << "Could not generate rsa keys for secondary " << ManagedSecondary::getSerial() << "@" - << sconfig.ecu_hardware_id; - throw std::runtime_error("Unable to generate secondary rsa keys"); - } - - // do not store keys yet, wait until SotaUptaneClient performed device initialization - } - public_key_ = PublicKey(public_key_string, sconfig.key_type); - Initialize(); -} - -void ManagedSecondary::Initialize() { - struct stat st {}; - + struct stat stat_buf {}; if (!boost::filesystem::is_directory(sconfig.metadata_path)) { Utils::createDirectories(sconfig.metadata_path, S_IRWXU); } - if (stat(sconfig.metadata_path.c_str(), &st) < 0) { + if (stat(sconfig.metadata_path.c_str(), &stat_buf) < 0) { throw std::runtime_error(std::string("Could not check metadata directory permissions: ") + std::strerror(errno)); } - if ((st.st_mode & (S_IWGRP | S_IWOTH)) != 0) { + if ((stat_buf.st_mode & (S_IWGRP | S_IWOTH)) != 0) { throw std::runtime_error("Secondary metadata directory has unsafe permissions"); } if (!boost::filesystem::is_directory(sconfig.full_client_dir)) { Utils::createDirectories(sconfig.full_client_dir, S_IRWXU); } - if (stat(sconfig.full_client_dir.c_str(), &st) < 0) { - throw std::runtime_error(std::string("Could not check client directory permissions: ") + std::strerror(errno)); + if (stat(sconfig.full_client_dir.c_str(), &stat_buf) < 0) { + throw std::runtime_error(std::string("Could not check Secondary storage directory permissions: ") + + std::strerror(errno)); } - if ((st.st_mode & (S_IWGRP | S_IWOTH)) != 0) { - throw std::runtime_error("Secondary client directory has unsafe permissions"); + if ((stat_buf.st_mode & (S_IWGRP | S_IWOTH)) != 0) { + throw std::runtime_error("Secondary storage directory has unsafe permissions"); } - storeKeys(public_key_.Value(), private_key); -} + std::string public_key_string; + bool did_load_keys = loadKeys(&public_key_string, &private_key); + if (!did_load_keys) { + bool generated_keys_ok = Crypto::generateKeyPair(sconfig.key_type, &public_key_string, &private_key); + if (!generated_keys_ok) { + LOG_ERROR << "Could not generate RSA keys for secondary " << ManagedSecondary::getSerial() << "@" + << sconfig.ecu_hardware_id; + throw std::runtime_error("Unable to generate secondary RSA keys"); + } + storeKeys(public_key_string, private_key); + } + public_key_ = PublicKey(public_key_string, sconfig.key_type); -void ManagedSecondary::rawToMeta() { - // raw meta is trusted - current_meta.director_root = - Uptane::Root(Uptane::RepositoryType::Director(), Utils::parseJSON(current_raw_meta.director_root)); - current_meta.director_targets = Uptane::Targets(Utils::parseJSON(current_raw_meta.director_targets)); - current_meta.image_root = - Uptane::Root(Uptane::RepositoryType::Image(), Utils::parseJSON(current_raw_meta.image_root)); - current_meta.image_targets = Uptane::Targets(Utils::parseJSON(current_raw_meta.image_targets)); - current_meta.image_timestamp = Uptane::TimestampMeta(Utils::parseJSON(current_raw_meta.image_timestamp)); - current_meta.image_snapshot = Uptane::Snapshot(Utils::parseJSON(current_raw_meta.image_snapshot)); -} + storage_config_.path = sconfig.full_client_dir; + storage_ = INvStorage::newStorage(storage_config_); -bool ManagedSecondary::putMetadata(const Uptane::RawMetaPack &meta_pack) { - // No verification is currently performed, we can add verification in future for testing purposes - detected_attack = ""; + director_repo_ = std_::make_unique(); + image_repo_ = std_::make_unique(); - current_raw_meta = meta_pack; - rawToMeta(); // current_raw_meta -> current_meta - if (!current_meta.isConsistent()) { - return false; - } - storeMetadata(current_raw_meta); - - expected_target_name = ""; - expected_target_hashes.clear(); - expected_target_length = 0; - - bool target_found = false; - - std::vector::const_iterator it; - for (it = current_meta.director_targets.targets.begin(); it != current_meta.director_targets.targets.end(); ++it) { - // TODO: what about hardware ID? Also missing in Uptane::Target - if (it->ecus().find(getSerial()) != it->ecus().end()) { - if (target_found) { - detected_attack = "Duplicate entry for this ECU"; - break; - } - expected_target_name = it->filename(); - expected_target_hashes = it->hashes(); - expected_target_length = it->length(); - target_found = true; - } + try { + director_repo_->checkMetaOffline(*storage_); + image_repo_->checkMetaOffline(*storage_); + } catch (const std::exception &e) { + LOG_INFO << "No valid metadata found in storage."; } +} - if (!target_found) { - detected_attack = "No update for this ECU"; - } +ManagedSecondary::~ManagedSecondary() {} // NOLINT(modernize-use-equals-default, hicpp-use-equals-default) - return true; +data::InstallationResult ManagedSecondary::putMetadata(const Uptane::Target &target) { + detected_attack = ""; + + Uptane::MetaBundle bundle; + if (!secondary_provider_->getMetadata(&bundle, target)) { + return data::InstallationResult(data::ResultCode::Numeric::kInternalError, + "Unable to load stored metadata from Primary"); + } + Uptane::SecondaryMetadata metadata(bundle); + + // 2. Download and check the Root metadata file from the Director repository. + // 3. NOT SUPPORTED: Download and check the Timestamp metadata file from the Director repository. + // 4. NOT SUPPORTED: Download and check the Snapshot metadata file from the Director repository. + // 5. Download and check the Targets metadata file from the Director repository. + try { + director_repo_->updateMeta(*storage_, metadata); + } catch (const std::exception &e) { + detected_attack = std::string("Failed to update Director metadata: ") + e.what(); + LOG_ERROR << detected_attack; + return data::InstallationResult(data::ResultCode::Numeric::kVerificationFailed, detected_attack); + } + + // 6. Download and check the Root metadata file from the Image repository. + // 7. Download and check the Timestamp metadata file from the Image repository. + // 8. Download and check the Snapshot metadata file from the Image repository. + // 9. Download and check the top-level Targets metadata file from the Image repository. + try { + image_repo_->updateMeta(*storage_, metadata); + } catch (const std::exception &e) { + detected_attack = std::string("Failed to update Image repo metadata: ") + e.what(); + LOG_ERROR << detected_attack; + return data::InstallationResult(data::ResultCode::Numeric::kVerificationFailed, detected_attack); + } + + // 10. Verify that Targets metadata from the Director and Image repositories match. + if (!director_repo_->matchTargetsWithImageTargets(image_repo_->getTargets())) { + detected_attack = "Targets metadata from the Director and Image repositories do not match"; + LOG_ERROR << detected_attack; + return data::InstallationResult(data::ResultCode::Numeric::kVerificationFailed, detected_attack); + } + + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); } -int ManagedSecondary::getRootVersion(const bool director) { +int ManagedSecondary::getRootVersion(const bool director) const { if (director) { - return current_meta.director_root.version(); + return director_repo_->rootVersion(); } - return current_meta.image_root.version(); + return image_repo_->rootVersion(); } -bool ManagedSecondary::putRoot(const std::string &root, const bool director) { - Uptane::Root &prev_root = (director) ? current_meta.director_root : current_meta.image_root; - std::string &prev_raw_root = (director) ? current_raw_meta.director_root : current_raw_meta.image_root; - Uptane::Root new_root = Uptane::Root( - (director) ? Uptane::RepositoryType::Director() : Uptane::RepositoryType::Image(), Utils::parseJSON(root)); +data::InstallationResult ManagedSecondary::putRoot(const std::string &root, const bool director) { + const Uptane::RepositoryType repo_type = + (director) ? Uptane::RepositoryType::Director() : Uptane::RepositoryType::Image(); + const int prev_version = getRootVersion(director); - // No verification is currently performed, we can add verification in future for testing purposes - if (new_root.version() == prev_root.version() + 1) { - prev_root = new_root; - prev_raw_root = root; + LOG_DEBUG << "Updating " << repo_type << " Root with current version " << std::to_string(prev_version) << ": " + << root; + + if (director) { + try { + director_repo_->verifyRoot(root); + } catch (const std::exception &e) { + detected_attack = "Failed to update Director Root from version " + std::to_string(prev_version) + ": " + e.what(); + LOG_ERROR << detected_attack; + return data::InstallationResult(data::ResultCode::Numeric::kVerificationFailed, detected_attack); + } + storage_->storeRoot(root, repo_type, Uptane::Version(director_repo_->rootVersion())); + storage_->clearNonRootMeta(repo_type); } else { - detected_attack = "Tried to update root version " + std::to_string(prev_root.version()) + " with version " + - std::to_string(new_root.version()); + try { + image_repo_->verifyRoot(root); + } catch (const std::exception &e) { + detected_attack = "Failed to update Image Root from version " + std::to_string(prev_version) + ": " + e.what(); + LOG_ERROR << detected_attack; + return data::InstallationResult(data::ResultCode::Numeric::kVerificationFailed, detected_attack); + } + storage_->storeRoot(root, repo_type, Uptane::Version(image_repo_->rootVersion())); + storage_->clearNonRootMeta(repo_type); } - if (!current_meta.isConsistent()) { - return false; - } - storeMetadata(current_raw_meta); - return true; + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); } -bool ManagedSecondary::sendFirmware(const std::shared_ptr &data) { - std::lock_guard l(install_mutex); - - if (expected_target_name.empty()) { - return false; - } - if (!detected_attack.empty()) { - return false; - } +data::InstallationResult ManagedSecondary::sendFirmware(const Uptane::Target &target) { + (void)target; + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); +} - if (data->size() > static_cast(expected_target_length)) { - detected_attack = "overflow"; - return false; - } +data::InstallationResult ManagedSecondary::install(const Uptane::Target &target) { + // TODO: check that the target is actually valid. + auto str = secondary_provider_->getTargetFileHandle(target); + std::ofstream out_file(sconfig.firmware_path.string(), std::ios::binary); + out_file << str.rdbuf(); + str.close(); + out_file.close(); - std::vector::const_iterator it; - for (it = expected_target_hashes.begin(); it != expected_target_hashes.end(); it++) { - if (it->TypeString() == "sha256") { - if (boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha256digest(*data))) != - boost::algorithm::to_lower_copy(it->HashString())) { - detected_attack = "wrong_hash"; - return false; - } - } else if (it->TypeString() == "sha512") { - if (boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha512digest(*data))) != - boost::algorithm::to_lower_copy(it->HashString())) { - detected_attack = "wrong_hash"; - return false; - } - } - } - detected_attack = ""; - const bool result = storeFirmware(expected_target_name, *data); - return result; + Utils::writeFile(sconfig.target_name_path, target.filename()); + return data::InstallationResult(data::ResultCode::Numeric::kOk, ""); } -Json::Value ManagedSecondary::getManifest() { - std::string hash; - std::string targetname; - size_t target_len; - if (!getFirmwareInfo(&targetname, target_len, &hash)) { - return Json::nullValue; +Uptane::Manifest ManagedSecondary::getManifest() const { + Uptane::InstalledImageInfo firmware_info; + if (!getFirmwareInfo(firmware_info)) { + return Json::Value(Json::nullValue); } - Json::Value manifest; - - // package manager will generate this part in future - Json::Value installed_image; - installed_image["filepath"] = targetname; - - installed_image["fileinfo"]["hashes"]["sha256"] = hash; - installed_image["fileinfo"]["length"] = static_cast(target_len); - + Json::Value manifest = Uptane::ManifestIssuer::assembleManifest(firmware_info, getSerial()); + // consider updating Uptane::ManifestIssuer functionality to fulfill the given use-case + // and removing the following code from here so we encapsulate manifest generation + // and signing functionality in one place manifest["attacks_detected"] = detected_attack; - manifest["installed_image"] = installed_image; - manifest["ecu_serial"] = getSerial().ToString(); - manifest["previous_timeserver_time"] = "1970-01-01T00:00:00Z"; - manifest["timeserver_time"] = "1970-01-01T00:00:00Z"; Json::Value signed_ecu_version; - std::string b64sig = Utils::toBase64(Crypto::RSAPSSSign(nullptr, private_key, Json::FastWriter().write(manifest))); + std::string b64sig = Utils::toBase64(Crypto::RSAPSSSign(nullptr, private_key, Utils::jsonToCanonicalStr(manifest))); Json::Value signature; signature["method"] = "rsassa-pss"; signature["sig"] = b64sig; @@ -212,9 +197,27 @@ Json::Value ManagedSecondary::getManifest() { return signed_ecu_version; } +bool ManagedSecondary::getFirmwareInfo(Uptane::InstalledImageInfo &firmware_info) const { + std::string content; + + if (!boost::filesystem::exists(sconfig.target_name_path) || !boost::filesystem::exists(sconfig.firmware_path)) { + firmware_info.name = std::string("noimage"); + content = ""; + } else { + firmware_info.name = Utils::readFile(sconfig.target_name_path.string()); + content = Utils::readFile(sconfig.firmware_path.string()); + } + firmware_info.hash = Uptane::ManifestIssuer::generateVersionHashStr(content); + firmware_info.len = content.size(); + + return true; +} + void ManagedSecondary::storeKeys(const std::string &pub_key, const std::string &priv_key) { Utils::writeFile((sconfig.full_client_dir / sconfig.ecu_private_key), priv_key); Utils::writeFile((sconfig.full_client_dir / sconfig.ecu_public_key), pub_key); + sync(); + did_store_keys++; // For testing } bool ManagedSecondary::loadKeys(std::string *pub_key, std::string *priv_key) { diff --git a/src/virtual_secondary/managedsecondary.h b/src/virtual_secondary/managedsecondary.h index 2e24b610f4..e98b621fd2 100644 --- a/src/virtual_secondary/managedsecondary.h +++ b/src/virtual_secondary/managedsecondary.h @@ -5,23 +5,27 @@ #include #include -#include +#include #include "json/json.h" +#include "libaktualizr/secondaryinterface.h" +#include "libaktualizr/types.h" #include "primary/secondary_config.h" -#include "uptane/secondaryinterface.h" -#include "utilities/types.h" +#include "uptane/secondary_metadata.h" + +namespace Uptane { +class DirectorRepository; +class ImageRepository; +} // namespace Uptane + +class INvStorage; namespace Primary { class ManagedSecondaryConfig : public SecondaryConfig { public: - ManagedSecondaryConfig(const char* type = Type) : SecondaryConfig(type) {} + explicit ManagedSecondaryConfig(const std::string& type = "managed") : SecondaryConfig(type) {} - public: - constexpr static const char* const Type = "managed"; - - public: bool partial_verifying{false}; std::string ecu_serial; std::string ecu_hardware_id; @@ -34,59 +38,62 @@ class ManagedSecondaryConfig : public SecondaryConfig { KeyType key_type{KeyType::kRSA2048}; }; -// Managed secondary is an abstraction over virtual and other types of legacy -// (non-UPTANE) secondaries. They require all the UPTANE-related functionality -// to be implemented in aktualizr itself, so there's some shared code. - -class ManagedSecondary : public Uptane::SecondaryInterface { +// ManagedSecondary is an abstraction over virtual and other types of legacy +// (non-Uptane) Secondaries. They require any the Uptane-related functionality +// to be implemented in aktualizr itself. +class ManagedSecondary : public SecondaryInterface { public: explicit ManagedSecondary(Primary::ManagedSecondaryConfig sconfig_in); - ~ManagedSecondary() override = default; + // Prevent inlining to enable forward declarations. + ~ManagedSecondary() override; + ManagedSecondary(const ManagedSecondary&) = delete; + ManagedSecondary& operator=(const ManagedSecondary&) = delete; - void Initialize(); + void init(std::shared_ptr secondary_provider_in) override { + secondary_provider_ = std::move(secondary_provider_in); + } - Uptane::EcuSerial getSerial() override { + Uptane::EcuSerial getSerial() const override { if (!sconfig.ecu_serial.empty()) { return Uptane::EcuSerial(sconfig.ecu_serial); } return Uptane::EcuSerial(public_key_.KeyId()); } - Uptane::HardwareIdentifier getHwId() override { return Uptane::HardwareIdentifier(sconfig.ecu_hardware_id); } - PublicKey getPublicKey() override { return public_key_; } - bool putMetadata(const Uptane::RawMetaPack& meta_pack) override; - int getRootVersion(bool director) override; - bool putRoot(const std::string& root, bool director) override; + Uptane::HardwareIdentifier getHwId() const override { return Uptane::HardwareIdentifier(sconfig.ecu_hardware_id); } + PublicKey getPublicKey() const override { return public_key_; } + data::InstallationResult putMetadata(const Uptane::Target& target) override; + int getRootVersion(bool director) const override; + data::InstallationResult putRoot(const std::string& root, bool director) override; - bool sendFirmware(const std::shared_ptr& data) override; - Json::Value getManifest() override; + data::InstallationResult sendFirmware(const Uptane::Target& target) override; + data::InstallationResult install(const Uptane::Target& target) override; + Uptane::Manifest getManifest() const override; + + // Public for testing only bool loadKeys(std::string* pub_key, std::string* priv_key); + int storeKeysCount() const { return did_store_keys; } protected: - Primary::ManagedSecondaryConfig sconfig; + ManagedSecondary(ManagedSecondary&&) = default; + ManagedSecondary& operator=(ManagedSecondary&&) = default; - private: - PublicKey public_key_; - std::string private_key; + virtual bool getFirmwareInfo(Uptane::InstalledImageInfo& firmware_info) const; + std::shared_ptr secondary_provider_; + Primary::ManagedSecondaryConfig sconfig; std::string detected_attack; - std::string expected_target_name; - std::vector expected_target_hashes; - uint64_t expected_target_length{}; - - Uptane::MetaPack current_meta; - Uptane::RawMetaPack current_raw_meta; - std::mutex install_mutex; - - virtual bool storeFirmware(const std::string& target_name, const std::string& content) = 0; - virtual bool getFirmwareInfo(std::string* target_name, size_t& target_len, std::string* sha256hash) = 0; + private: void storeKeys(const std::string& pub_key, const std::string& priv_key); - void rawToMeta(); - // TODO: implement - void storeMetadata(const Uptane::RawMetaPack& meta_pack) { (void)meta_pack; } - bool loadMetadata(Uptane::RawMetaPack* meta_pack); + int did_store_keys{0}; // For testing + std::unique_ptr director_repo_; + std::unique_ptr image_repo_; + PublicKey public_key_; + std::string private_key; + StorageConfig storage_config_; + std::shared_ptr storage_; }; } // namespace Primary diff --git a/src/virtual_secondary/partialverificationsecondary.cc b/src/virtual_secondary/partialverificationsecondary.cc deleted file mode 100644 index 17e0b4da39..0000000000 --- a/src/virtual_secondary/partialverificationsecondary.cc +++ /dev/null @@ -1,102 +0,0 @@ -#include "partialverificationsecondary.h" - -#include -#include - -#include -#include "json/json.h" - -#include "logging/logging.h" -#include "uptane/secondaryinterface.h" -#include "utilities/exceptions.h" -#include "utilities/types.h" - -namespace Uptane { - -PartialVerificationSecondary::PartialVerificationSecondary(Primary::PartialVerificationSecondaryConfig sconfig_in) - : sconfig(std::move(sconfig_in)), root_(Root::Policy::kAcceptAll) { - boost::filesystem::create_directories(sconfig.metadata_path); - - // FIXME Probably we need to generate keys on the secondary - std::string public_key_string; - if (!loadKeys(&public_key_string, &private_key_)) { - if (!Crypto::generateKeyPair(sconfig.key_type, &public_key_string, &private_key_)) { - LOG_ERROR << "Could not generate keys for secondary " << PartialVerificationSecondary::getSerial() << "@" - << sconfig.ecu_hardware_id; - throw std::runtime_error("Unable to generate secondary keys"); - } - storeKeys(public_key_string, private_key_); - } - public_key_ = PublicKey(public_key_string, sconfig.key_type); -} - -bool PartialVerificationSecondary::putMetadata(const RawMetaPack &meta) { - TimeStamp now(TimeStamp::Now()); - detected_attack_.clear(); - - // TODO: check for expiration and version downgrade - root_ = Uptane::Root(RepositoryType::Director(), Utils::parseJSON(meta.director_root), root_); - Uptane::Targets targets(RepositoryType::Director(), Role::Targets(), Utils::parseJSON(meta.director_targets), - std::make_shared(root_)); - if (meta_targets_.version() > targets.version()) { - detected_attack_ = "Rollback attack detected"; - return true; - } - meta_targets_ = targets; - std::vector::const_iterator it; - bool target_found = false; - for (it = meta_targets_.targets.begin(); it != meta_targets_.targets.end(); ++it) { - if (it->IsForSecondary(getSerial())) { - if (target_found) { - detected_attack_ = "Duplicate entry for this ECU"; - break; - } - target_found = true; - } - } - return true; -} - -Json::Value PartialVerificationSecondary::getManifest() { - throw NotImplementedException(); - return Json::Value(); -} - -int PartialVerificationSecondary::getRootVersion(bool director) { - (void)director; - throw NotImplementedException(); - return 0; -} - -bool PartialVerificationSecondary::putRoot(const std::string &root, bool director) { - (void)root; - (void)director; - - throw NotImplementedException(); - return false; -} - -bool PartialVerificationSecondary::sendFirmware(const std::shared_ptr &data) { - (void)data; - throw NotImplementedException(); - return false; -} - -void PartialVerificationSecondary::storeKeys(const std::string &public_key, const std::string &private_key) { - Utils::writeFile((sconfig.full_client_dir / sconfig.ecu_private_key), private_key); - Utils::writeFile((sconfig.full_client_dir / sconfig.ecu_public_key), public_key); -} - -bool PartialVerificationSecondary::loadKeys(std::string *public_key, std::string *private_key) { - boost::filesystem::path public_key_path = sconfig.full_client_dir / sconfig.ecu_public_key; - boost::filesystem::path private_key_path = sconfig.full_client_dir / sconfig.ecu_private_key; - - if (!boost::filesystem::exists(public_key_path) || !boost::filesystem::exists(private_key_path)) { - return false; - } - - *private_key = Utils::readFile(private_key_path.string()); - *public_key = Utils::readFile(public_key_path.string()); - return true; -} -} // namespace Uptane diff --git a/src/virtual_secondary/partialverificationsecondary.h b/src/virtual_secondary/partialverificationsecondary.h deleted file mode 100644 index f633435b21..0000000000 --- a/src/virtual_secondary/partialverificationsecondary.h +++ /dev/null @@ -1,63 +0,0 @@ -#ifndef UPTANE_PARTIALVRIFICATIONSECONDARY_H_ -#define UPTANE_PARTIALVRIFICATIONSECONDARY_H_ - -#include -#include - -#include -#include "json/json.h" - -#include "uptane/secondaryinterface.h" -#include "utilities/types.h" - -#include "managedsecondary.h" - -namespace Primary { - -class PartialVerificationSecondaryConfig : public ManagedSecondaryConfig { - public: - PartialVerificationSecondaryConfig() : ManagedSecondaryConfig(Type) {} - - public: - constexpr static const char* const Type = "partial-verification"; -}; - -} // namespace Primary - -namespace Uptane { - -class PartialVerificationSecondary : public SecondaryInterface { - public: - explicit PartialVerificationSecondary(Primary::PartialVerificationSecondaryConfig sconfig_in); - - EcuSerial getSerial() override { - if (!sconfig.ecu_serial.empty()) { - return Uptane::EcuSerial(sconfig.ecu_serial); - } - return Uptane::EcuSerial(public_key_.KeyId()); - } - Uptane::HardwareIdentifier getHwId() override { return Uptane::HardwareIdentifier(sconfig.ecu_hardware_id); } - PublicKey getPublicKey() override { return public_key_; } - - bool putMetadata(const RawMetaPack& meta) override; - int getRootVersion(bool director) override; - bool putRoot(const std::string& root, bool director) override; - - bool sendFirmware(const std::shared_ptr& data) override; - Json::Value getManifest() override; - - private: - void storeKeys(const std::string& public_key, const std::string& private_key); - bool loadKeys(std::string* public_key, std::string* private_key); - - Primary::PartialVerificationSecondaryConfig sconfig; - Uptane::Root root_; - PublicKey public_key_; - std::string private_key_; - - std::string detected_attack_; - Uptane::Targets meta_targets_; -}; -} // namespace Uptane - -#endif // UPTANE_PARTIALVRIFICATIONSECONDARY_H_ diff --git a/src/virtual_secondary/virtual_secondary_test.cc b/src/virtual_secondary/virtual_secondary_test.cc index 2beb5e3e48..2547a4fa17 100644 --- a/src/virtual_secondary/virtual_secondary_test.cc +++ b/src/virtual_secondary/virtual_secondary_test.cc @@ -1,8 +1,9 @@ #include -#include "metafake.h" -#include "partialverificationsecondary.h" -#include "uptane/secondaryinterface.h" +#include "httpfake.h" +#include "libaktualizr/secondaryinterface.h" +#include "uptane_test_common.h" +#include "utilities/utils.h" #include "virtualsecondary.h" class VirtualSecondaryTest : public ::testing::Test { @@ -27,71 +28,174 @@ class VirtualSecondaryTest : public ::testing::Test { Primary::VirtualSecondaryConfig config_; }; -class PartialVerificationSecondaryTest : public ::testing::Test { - protected: - PartialVerificationSecondaryTest() { - config_.partial_verifying = true; - config_.full_client_dir = temp_dir_.Path(); - config_.ecu_serial = ""; - config_.ecu_hardware_id = "secondary_hardware"; - config_.ecu_private_key = "sec.priv"; - config_.ecu_public_key = "sec.pub"; - config_.firmware_path = temp_dir_.Path() / "firmware.txt"; - config_.target_name_path = temp_dir_.Path() / "firmware_name.txt"; - config_.metadata_path = temp_dir_.Path() / "metadata"; - } +/* Create a virtual secondary for testing. */ +TEST_F(VirtualSecondaryTest, Instantiation) { EXPECT_NO_THROW(Primary::VirtualSecondary virtual_sec(config_)); } - virtual void SetUp() {} - virtual void TearDown() {} +/* + * Rotate both Director and Image repo Root keys twice and make sure the Primary + * correctly sends the intermediate Roots to the Secondary. + */ +TEST(VirtualSecondary, RootRotation) { + TemporaryDirectory temp_dir; + TemporaryDirectory meta_dir; + auto http = std::make_shared(temp_dir.Path(), "", meta_dir.Path() / "repo"); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + logger_set_threshold(boost::log::trivial::trace); - protected: - TemporaryDirectory temp_dir_; - Primary::PartialVerificationSecondaryConfig config_; -}; + auto storage = INvStorage::newStorage(conf.storage); + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + aktualizr.Initialize(); -/* Create a virtual secondary for testing. */ -TEST_F(VirtualSecondaryTest, Instantiation) { EXPECT_NO_THROW(Primary::VirtualSecondary virtual_sec(config_)); } + UptaneRepo uptane_repo{meta_dir.PathString(), "", ""}; + uptane_repo.generateRepo(KeyType::kED25519); + uptane_repo.addImage("tests/test_data/firmware.txt", "firmware.txt", "secondary_hw"); + uptane_repo.addTarget("firmware.txt", "secondary_hw", "secondary_ecu_serial"); + uptane_repo.signTargets(); -/* Partial verification secondaries generate and store public keys. */ -TEST_F(PartialVerificationSecondaryTest, Uptane_get_key) { - Uptane::PartialVerificationSecondary sec1(config_); - PublicKey key1 = sec1.getPublicKey(); - Uptane::PartialVerificationSecondary sec2(config_); - PublicKey key2 = sec2.getPublicKey(); - // Verify that we store keys - EXPECT_EQ(key1, key2); + result::UpdateCheck update_result = aktualizr.CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kUpdatesAvailable); + result::Download download_result = aktualizr.Download(update_result.updates).get(); + ASSERT_EQ(download_result.status, result::DownloadStatus::kSuccess); + result::Install install_result = aktualizr.Install(download_result.updates).get(); + EXPECT_TRUE(install_result.dev_report.success); + + uptane_repo.rotate(Uptane::RepositoryType::Director(), Uptane::Role::Root(), KeyType::kED25519); + uptane_repo.rotate(Uptane::RepositoryType::Director(), Uptane::Role::Root(), KeyType::kED25519); + uptane_repo.emptyTargets(); + uptane_repo.addImage("tests/test_data/firmware_name.txt", "firmware_name.txt", "secondary_hw"); + uptane_repo.addTarget("firmware_name.txt", "secondary_hw", "secondary_ecu_serial"); + uptane_repo.signTargets(); + + update_result = aktualizr.CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kUpdatesAvailable); + download_result = aktualizr.Download(update_result.updates).get(); + ASSERT_EQ(download_result.status, result::DownloadStatus::kSuccess); + install_result = aktualizr.Install(download_result.updates).get(); + EXPECT_TRUE(install_result.dev_report.success); + + uptane_repo.rotate(Uptane::RepositoryType::Image(), Uptane::Role::Root(), KeyType::kED25519); + uptane_repo.rotate(Uptane::RepositoryType::Image(), Uptane::Role::Root(), KeyType::kED25519); + uptane_repo.emptyTargets(); + uptane_repo.addImage("tests/test_data/firmware.txt", "firmware2.txt", "secondary_hw"); + uptane_repo.addTarget("firmware2.txt", "secondary_hw", "secondary_ecu_serial"); + uptane_repo.signTargets(); + + update_result = aktualizr.CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kUpdatesAvailable); + download_result = aktualizr.Download(update_result.updates).get(); + ASSERT_EQ(download_result.status, result::DownloadStatus::kSuccess); + install_result = aktualizr.Install(download_result.updates).get(); + EXPECT_TRUE(install_result.dev_report.success); } -/* Partial verification secondaries can verify Uptane metadata. */ -TEST_F(PartialVerificationSecondaryTest, Uptane_putMetadata_good) { - Uptane::PartialVerificationSecondary sec(config_); - Uptane::RawMetaPack metadata; +/** + * The secondary generates a key pair on first run, and re-uses it afterwards + */ +TEST_F(VirtualSecondaryTest, GeneratesPublicKey) { + auto priv_key_path = temp_dir_ / "sec.priv"; + auto pub_key_path = temp_dir_ / "sec.pub"; - TemporaryDirectory temp_dir; - MetaFake meta(temp_dir.Path()); - metadata.director_root = Utils::readFile(temp_dir / "director/root.json"); - metadata.director_targets = Utils::readFile(temp_dir / "director/targets_hasupdates.json"); - EXPECT_NO_THROW(sec.putMetadata(metadata)); + // Shouldn't exist before + EXPECT_FALSE(boost::filesystem::exists(priv_key_path)); + EXPECT_FALSE(boost::filesystem::exists(pub_key_path)); + { + Primary::VirtualSecondary first_run(config_); + // Does exist after the first run + EXPECT_TRUE(boost::filesystem::exists(priv_key_path)); + EXPECT_TRUE(boost::filesystem::exists(pub_key_path)); + EXPECT_EQ(first_run.storeKeysCount(), 1); + } + + std::string old_priv_key = Utils::readFile(priv_key_path); + std::string old_pub_key = Utils::readFile(pub_key_path); + // After a reboot... + Primary::VirtualSecondary second_run(config_); + EXPECT_EQ(second_run.storeKeysCount(), 0); + + // The files still exist + EXPECT_TRUE(boost::filesystem::exists(priv_key_path)); + EXPECT_TRUE(boost::filesystem::exists(pub_key_path)); + + // And their contents are unchanged + std::string new_priv_key = Utils::readFile(priv_key_path); + std::string new_pub_key = Utils::readFile(pub_key_path); + EXPECT_EQ(old_pub_key, new_pub_key); + EXPECT_EQ(old_priv_key, new_priv_key); } -/* Partial verification secondaries reject invalid Uptane metadata. */ -TEST_F(PartialVerificationSecondaryTest, Uptane_putMetadata_bad) { - Uptane::PartialVerificationSecondary sec(config_); - Uptane::RawMetaPack metadata; +#ifdef FIU_ENABLE + +#include "utilities/fault_injection.h" +/* + * Verifies that updates fail after Root rotation verification failure reported by Secondaries. + */ +TEST(VirtualSecondary, RootRotationFailure) { TemporaryDirectory temp_dir; - MetaFake meta(temp_dir.Path()); - metadata.director_root = Utils::readFile(temp_dir / "director/root.json"); + TemporaryDirectory meta_dir; + auto http = std::make_shared(temp_dir.Path(), "", meta_dir.Path() / "repo"); + Config conf = UptaneTestCommon::makeTestConfig(temp_dir, http->tls_server); + logger_set_threshold(boost::log::trivial::trace); - Json::Value json_targets = Utils::parseJSONFile(temp_dir / "director/targets_hasupdates.json"); - json_targets["signatures"][0]["sig"] = "Wrong signature"; - metadata.director_targets = Utils::jsonToStr(json_targets); - EXPECT_THROW(sec.putMetadata(metadata), Uptane::BadKeyId); + auto storage = INvStorage::newStorage(conf.storage); + UptaneTestCommon::TestAktualizr aktualizr(conf, storage, http); + aktualizr.Initialize(); + + UptaneRepo uptane_repo{meta_dir.PathString(), "", ""}; + uptane_repo.generateRepo(KeyType::kED25519); + uptane_repo.addImage("tests/test_data/firmware.txt", "firmware.txt", "secondary_hw"); + uptane_repo.addTarget("firmware.txt", "secondary_hw", "secondary_ecu_serial"); + uptane_repo.signTargets(); + + result::UpdateCheck update_result = aktualizr.CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kUpdatesAvailable); + result::Download download_result = aktualizr.Download(update_result.updates).get(); + ASSERT_EQ(download_result.status, result::DownloadStatus::kSuccess); + result::Install install_result = aktualizr.Install(download_result.updates).get(); + EXPECT_TRUE(install_result.dev_report.success); + + uptane_repo.rotate(Uptane::RepositoryType::Director(), Uptane::Role::Root(), KeyType::kED25519); + uptane_repo.rotate(Uptane::RepositoryType::Director(), Uptane::Role::Root(), KeyType::kED25519); + uptane_repo.emptyTargets(); + uptane_repo.addImage("tests/test_data/firmware_name.txt", "firmware_name.txt", "secondary_hw"); + uptane_repo.addTarget("firmware_name.txt", "secondary_hw", "secondary_ecu_serial"); + uptane_repo.signTargets(); + + // This causes putRoot to be skipped, which means when the latest (v3) + // metadata is sent, the Secondary can't verify it, since it only has the v1 + // Root. + fault_injection_init(); + fiu_enable("secondary_putroot", 1, nullptr, 0); + + update_result = aktualizr.CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kUpdatesAvailable); + download_result = aktualizr.Download(update_result.updates).get(); + ASSERT_EQ(download_result.status, result::DownloadStatus::kSuccess); + install_result = aktualizr.Install(download_result.updates).get(); + EXPECT_EQ(install_result.dev_report.result_code, + data::ResultCode(data::ResultCode::Numeric::kVerificationFailed, "secondary_hw:VERIFICATION_FAILED")); + EXPECT_EQ(install_result.dev_report.description, "Sending metadata to one or more ECUs failed"); + + fiu_disable("secondary_putroot"); + + // Retry after disabling fault injection to verify the test. + update_result = aktualizr.CheckUpdates().get(); + EXPECT_EQ(update_result.status, result::UpdateStatus::kUpdatesAvailable); + download_result = aktualizr.Download(update_result.updates).get(); + ASSERT_EQ(download_result.status, result::DownloadStatus::kSuccess); + install_result = aktualizr.Install(download_result.updates).get(); + EXPECT_TRUE(install_result.dev_report.success); } +#endif // FIU_ENABLE + #ifndef __NO_MAIN__ int main(int argc, char **argv) { ::testing::InitGoogleTest(&argc, argv); + + logger_init(); + logger_set_threshold(boost::log::trivial::trace); + return RUN_ALL_TESTS(); } #endif diff --git a/src/virtual_secondary/virtualsecondary.cc b/src/virtual_secondary/virtualsecondary.cc index c363f03a49..dc2bfd981e 100644 --- a/src/virtual_secondary/virtualsecondary.cc +++ b/src/virtual_secondary/virtualsecondary.cc @@ -1,6 +1,7 @@ +#include + #include #include -#include #include "crypto/crypto.h" #include "utilities/fault_injection.h" @@ -9,7 +10,7 @@ namespace Primary { -const char* const VirtualSecondaryConfig::Type = "virtual"; +constexpr const char* const VirtualSecondaryConfig::Type; VirtualSecondaryConfig::VirtualSecondaryConfig(const Json::Value& json_config) : ManagedSecondaryConfig(Type) { partial_verifying = json_config["partial_verifying"].asBool(); @@ -26,10 +27,8 @@ VirtualSecondaryConfig::VirtualSecondaryConfig(const Json::Value& json_config) : std::vector VirtualSecondaryConfig::create_from_file( const boost::filesystem::path& file_full_path) { Json::Value json_config; - Json::Reader reader; std::ifstream json_file(file_full_path.string()); - - reader.parse(json_file, json_config); + Json::parseFromStream(Json::CharReaderBuilder(), json_file, &json_config, nullptr); json_file.close(); std::vector sec_configs; @@ -55,42 +54,62 @@ void VirtualSecondaryConfig::dump(const boost::filesystem::path& file_full_path) json_config["metadata_path"] = metadata_path.string(); Json::Value root; + // Append to the config file if it already exists. + if (boost::filesystem::exists(file_full_path)) { + root = Utils::parseJSONFile(file_full_path); + } root[Type].append(json_config); - Json::StyledStreamWriter json_writer; + Json::StreamWriterBuilder json_bwriter; + json_bwriter["indentation"] = "\t"; + std::unique_ptr const json_writer(json_bwriter.newStreamWriter()); + boost::filesystem::create_directories(file_full_path.parent_path()); std::ofstream json_file(file_full_path.string()); - json_writer.write(json_file, root); + json_writer->write(root, &json_file); json_file.close(); } VirtualSecondary::VirtualSecondary(Primary::VirtualSecondaryConfig sconfig_in) : ManagedSecondary(std::move(sconfig_in)) {} -bool VirtualSecondary::storeFirmware(const std::string& target_name, const std::string& content) { - if (fiu_fail((std::string("secondary_install_") + getSerial().ToString()).c_str()) != 0) { - return false; +data::InstallationResult VirtualSecondary::putMetadata(const Uptane::Target& target) { + if (fiu_fail("secondary_putmetadata") != 0) { + return data::InstallationResult(data::ResultCode::Numeric::kVerificationFailed, fault_injection_last_info()); + } + + return ManagedSecondary::putMetadata(target); +} + +data::InstallationResult VirtualSecondary::putRoot(const std::string& root, bool director) { + if (fiu_fail("secondary_putroot") != 0) { + return data::InstallationResult(data::ResultCode(data::ResultCode::Numeric::kOk, fault_injection_last_info()), + "Forced failure"); } - Utils::writeFile(sconfig.target_name_path, target_name); - Utils::writeFile(sconfig.firmware_path, content); - sync(); - return true; + + return ManagedSecondary::putRoot(root, director); } -bool VirtualSecondary::getFirmwareInfo(std::string* target_name, size_t& target_len, std::string* sha256hash) { - std::string content; +data::InstallationResult VirtualSecondary::sendFirmware(const Uptane::Target& target) { + if (fiu_fail((std::string("secondary_sendfirmware_") + getSerial().ToString()).c_str()) != 0) { + // Put the injected failure string into the ResultCode so that it shows up + // in the device's concatenated InstallationResult. + return data::InstallationResult( + data::ResultCode(data::ResultCode::Numeric::kDownloadFailed, fault_injection_last_info()), "Forced failure"); + } + + return ManagedSecondary::sendFirmware(target); +} - if (!boost::filesystem::exists(sconfig.target_name_path) || !boost::filesystem::exists(sconfig.firmware_path)) { - *target_name = std::string("noimage"); - content = ""; - } else { - *target_name = Utils::readFile(sconfig.target_name_path.string()); - content = Utils::readFile(sconfig.firmware_path.string()); +data::InstallationResult VirtualSecondary::install(const Uptane::Target& target) { + if (fiu_fail((std::string("secondary_install_") + getSerial().ToString()).c_str()) != 0) { + // Put the injected failure string into the ResultCode so that it shows up + // in the device's concatenated InstallationResult. + return data::InstallationResult( + data::ResultCode(data::ResultCode::Numeric::kInstallFailed, fault_injection_last_info()), "Forced failure"); } - *sha256hash = boost::algorithm::to_lower_copy(boost::algorithm::hex(Crypto::sha256digest(content))); - target_len = content.size(); - return true; + return ManagedSecondary::install(target); } } // namespace Primary diff --git a/src/virtual_secondary/virtualsecondary.h b/src/virtual_secondary/virtualsecondary.h index 4db18eb868..d16f29ec60 100644 --- a/src/virtual_secondary/virtualsecondary.h +++ b/src/virtual_secondary/virtualsecondary.h @@ -3,31 +3,33 @@ #include +#include "libaktualizr/types.h" #include "managedsecondary.h" -#include "utilities/types.h" namespace Primary { class VirtualSecondaryConfig : public ManagedSecondaryConfig { public: + static constexpr const char* const Type{"virtual"}; + VirtualSecondaryConfig() : ManagedSecondaryConfig(Type) {} - VirtualSecondaryConfig(const Json::Value& json_config); + explicit VirtualSecondaryConfig(const Json::Value& json_config); static std::vector create_from_file(const boost::filesystem::path& file_full_path); void dump(const boost::filesystem::path& file_full_path) const; - - public: - static const char* const Type; }; class VirtualSecondary : public ManagedSecondary { public: explicit VirtualSecondary(Primary::VirtualSecondaryConfig sconfig_in); - ~VirtualSecondary() override = default; - private: - bool storeFirmware(const std::string& target_name, const std::string& content) override; - bool getFirmwareInfo(std::string* target_name, size_t& target_len, std::string* sha256hash) override; + std::string Type() const override { return VirtualSecondaryConfig::Type; } + data::InstallationResult putMetadata(const Uptane::Target& target) override; + data::InstallationResult putRoot(const std::string& root, bool director) override; + data::InstallationResult sendFirmware(const Uptane::Target& target) override; + data::InstallationResult install(const Uptane::Target& target) override; + + bool ping() const override { return true; } }; } // namespace Primary diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index eee4257e0b..30a0034cad 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -16,13 +16,12 @@ add_subdirectory(${GTEST_ROOT} ${CMAKE_CURRENT_BINARY_DIR}/gtest EXCLUDE_FROM_AL add_definitions(-Wswitch-default) add_subdirectory(uptane_repo_generation) -include_directories("${PROJECT_SOURCE_DIR}/src/libaktualizr/third_party/jsoncpp") - add_dependencies(build_tests aktualizr) if(BUILD_SOTA_TOOLS) add_dependencies(build_tests garage-push) add_dependencies(build_tests garage-check) endif(BUILD_SOTA_TOOLS) +add_dependencies(build_tests aktualizr-get) add_dependencies(build_tests aktualizr-secondary) add_dependencies(build_tests aktualizr-info) add_dependencies(build_tests uptane-generator) @@ -53,8 +52,7 @@ set(TEST_SOURCES httpfake.h metafake.h test_utils.cc test_utils.h uptane_vector_ include(CMakeParseArguments) -add_library(testutilities test_utils.cc) -list(APPEND TEST_LIBS testutilities) +add_library(testutilities STATIC test_utils.cc) if(BUILD_OSTREE) add_library(ostree_mock SHARED ostree_mock.c) @@ -82,7 +80,7 @@ endif() add_executable(aktualizr_uptane_vector_tests uptane_vector_tests.cc) -target_link_libraries(aktualizr_uptane_vector_tests aktualizr_static_lib ${TEST_LIBS}) +target_link_libraries(aktualizr_uptane_vector_tests ${TEST_LIBS}) add_dependencies(build_tests aktualizr_uptane_vector_tests) if(TESTSUITE_VALGRIND) @@ -90,7 +88,7 @@ if(TESTSUITE_VALGRIND) endif(TESTSUITE_VALGRIND) add_test(NAME test_uptane_vectors COMMAND ${PROJECT_SOURCE_DIR}/tests/run_vector_tests.sh -s ${PROJECT_SOURCE_DIR}/tests ${VECTOR_TESTS_ARGS} -- ${GOOGLE_TEST_OUTPUT}) -set_tests_properties(test_uptane_vectors PROPERTIES LABELS "noptest uptane_vectors") +set_tests_properties(test_uptane_vectors PROPERTIES LABELS "noptest") if(SOTA_PACKED_CREDENTIALS) add_test(NAME shared_cred_prov_test COMMAND ${PROJECT_SOURCE_DIR}/tests/shared_cred_prov_test.py @@ -113,21 +111,26 @@ if(SOTA_PACKED_CREDENTIALS) ) set_tests_properties(device_cred_prov_hsm_test PROPERTIES LABELS "credentials") endif(BUILD_P11 AND TEST_PKCS11_MODULE_PATH) -endif(SOTA_PACKED_CREDENTIALS) -if(BUILD_DEB) - add_test(NAME memory_test COMMAND ${PROJECT_SOURCE_DIR}/tests/memory_usage_test.sh - ${PROJECT_BINARY_DIR} WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) -endif(BUILD_DEB) + if(BUILD_SOTA_TOOLS) + # do not link this test with libaktualizr, but rather with sota_tools_lib + list(REMOVE_ITEM TEST_LIBS aktualizr_lib) + list(INSERT TEST_LIBS 0 sota_tools_lib) + add_aktualizr_test(NAME sota_tools_auth_cred_test + SOURCES authenticate_cred_test.cc + PROJECT_WORKING_DIRECTORY + ARGS ${SOTA_PACKED_CREDENTIALS}) + add_dependencies(t_sota_tools_auth_cred_test t_sota_tools_auth_test) + target_include_directories(t_sota_tools_auth_cred_test PUBLIC + ${PROJECT_SOURCE_DIR}/tests ${PROJECT_SOURCE_DIR}/src/sota_tools) + set_tests_properties(test_sota_tools_auth_cred_test PROPERTIES LABELS "credentials") + endif(BUILD_SOTA_TOOLS) +endif(SOTA_PACKED_CREDENTIALS) ############################################################################### # The test feature of cmake checks the return value when the program # exits. If the return value is zero, the testcase passes. -# test running the executable with command line option --help -add_test(NAME test_cmdline--help COMMAND aktualizr --help) -# test running the executable with command line option -h -add_test(NAME test_cmdline-h COMMAND aktualizr -h) # test running the executable with command line option --something add_test(NAME test_cmdline--something COMMAND aktualizr --something -c ${PROJECT_SOURCE_DIR}/tests/config/minimal.toml) @@ -192,17 +195,48 @@ set_tests_properties(test_log_negative add_test(NAME test_ip_secondary COMMAND ${PROJECT_SOURCE_DIR}/tests/ipsecondary_test.py --build-dir ${PROJECT_BINARY_DIR} --src-dir ${PROJECT_SOURCE_DIR}) +set_tests_properties(test_ip_secondary PROPERTIES LABELS "noptest") -add_test(NAME test_backend_failure - COMMAND ${PROJECT_SOURCE_DIR}/tests/test_backend_failure.py - --build-dir ${PROJECT_BINARY_DIR} --src-dir ${PROJECT_SOURCE_DIR} --ostree ${BUILD_OSTREE}) -set_tests_properties(test_backend_failure PROPERTIES LABELS "noptest") +add_test(NAME test_ip_secondary_rotation + COMMAND ${PROJECT_SOURCE_DIR}/tests/ipsecondary_rotation_test.py + --build-dir ${PROJECT_BINARY_DIR} --src-dir ${PROJECT_SOURCE_DIR}) +set_tests_properties(test_ip_secondary_rotation PROPERTIES LABELS "noptest") + +add_test(NAME test_director_failure + COMMAND ${PROJECT_SOURCE_DIR}/tests/test_director_failure.py + --build-dir ${PROJECT_BINARY_DIR} --src-dir ${PROJECT_SOURCE_DIR}) +set_tests_properties(test_director_failure PROPERTIES LABELS "noptest") + +add_test(NAME test_imagerepo_failure + COMMAND ${PROJECT_SOURCE_DIR}/tests/test_imagerepo_failure.py + --build-dir ${PROJECT_BINARY_DIR} --src-dir ${PROJECT_SOURCE_DIR}) +set_tests_properties(test_imagerepo_failure PROPERTIES LABELS "noptest") + +add_test(NAME test_customrepo_failure + COMMAND ${PROJECT_SOURCE_DIR}/tests/test_customrepo_failure.py + --build-dir ${PROJECT_BINARY_DIR} --src-dir ${PROJECT_SOURCE_DIR}) +set_tests_properties(test_customrepo_failure PROPERTIES LABELS "noptest") if(BUILD_OSTREE) - add_test(NAME test_update - COMMAND ${PROJECT_SOURCE_DIR}/tests/test_update.py + add_test(NAME test_ip_secondary_ostree + COMMAND ${PROJECT_SOURCE_DIR}/tests/ipsecondary_ostree_test.py + --build-dir ${PROJECT_BINARY_DIR} --src-dir ${PROJECT_SOURCE_DIR}) + set_tests_properties(test_ip_secondary_ostree PROPERTIES LABELS "noptest") + + add_test(NAME test_treehub_failure + COMMAND ${PROJECT_SOURCE_DIR}/tests/test_treehub_failure.py --build-dir ${PROJECT_BINARY_DIR} --src-dir ${PROJECT_SOURCE_DIR}) - set_tests_properties(test_update PROPERTIES LABELS "noptest") + set_tests_properties(test_treehub_failure PROPERTIES LABELS "noptest") + + add_test(NAME test_misc_ostree_update + COMMAND ${PROJECT_SOURCE_DIR}/tests/test_misc_ostree_update.py + --build-dir ${PROJECT_BINARY_DIR} --src-dir ${PROJECT_SOURCE_DIR}) + set_tests_properties(test_misc_ostree_update PROPERTIES LABELS "noptest") + + add_test(NAME test_ostree_custom_uri + COMMAND ${PROJECT_SOURCE_DIR}/tests/test_ostree_custom_uri.py + --build-dir ${PROJECT_BINARY_DIR} --src-dir ${PROJECT_SOURCE_DIR}) + set_tests_properties(test_ostree_custom_uri PROPERTIES LABELS "noptest") endif(BUILD_OSTREE) add_test(NAME test_aktualizr_kill @@ -214,7 +248,7 @@ add_test(NAME test_install_aktualizr_and_update COMMAND ${PROJECT_SOURCE_DIR}/te set_tests_properties(test_install_aktualizr_and_update PROPERTIES LABELS "noptest") add_executable(aktualizr-cycle-simple aktualizr_cycle_simple.cc) -target_link_libraries(aktualizr-cycle-simple aktualizr_static_lib ${AKTUALIZR_EXTERNAL_LIBS}) +target_link_libraries(aktualizr-cycle-simple aktualizr_lib) aktualizr_source_file_checks(aktualizr_cycle_simple.cc) add_dependencies(build_tests aktualizr-cycle-simple) diff --git a/tests/aktualizr.supp b/tests/aktualizr.supp index 51aae5c554..b1c942a9b6 100644 --- a/tests/aktualizr.supp +++ b/tests/aktualizr.supp @@ -45,12 +45,20 @@ ... } { - openssl ENGINE_load_private_key (FIXME!) + openssl ENGINE_load_private_key (TODO, see crypto.cc) Memcheck:Leak ... fun:ENGINE_load_private_key ... } +{ + libcrypto RSA_new_method + Memcheck:Leak + ... + fun:RSA_new_method + fun:_ZN6Crypto21generateRSAKeyPairEVPEi + ... +} { libp11 RSA leak with Openssl 1.1 (https://github.com/OpenSC/libp11/pull/246) Memcheck:Leak @@ -90,3 +98,19 @@ fun:ostree_sysroot_simple_write_deployment ... } +{ + asn1c ber_decode issues + Memcheck:Cond + ... + fun:CHOICE_decode_ber + fun:ber_decode + ... +} +{ + asn1c ber_decode issues + Memcheck:Value8 + ... + fun:CHOICE_decode_ber + fun:ber_decode + ... +} diff --git a/tests/aktualizr_cycle_simple.cc b/tests/aktualizr_cycle_simple.cc index 0cd11e33a8..3a0f144aae 100644 --- a/tests/aktualizr_cycle_simple.cc +++ b/tests/aktualizr_cycle_simple.cc @@ -1,18 +1,16 @@ -#include - -#include +#include #include #include #include -#include "config/config.h" +#include "libaktualizr/aktualizr.h" +#include "libaktualizr/config.h" #include "logging/logging.h" -#include "primary/aktualizr.h" #include "storage/sqlstorage.h" int updateOneCycle(const boost::filesystem::path &storage_dir, const std::string &server) { Config conf; - conf.pacman.type = PackageManager::kNone; + conf.pacman.type = PACKAGE_MANAGER_NONE; conf.pacman.fake_need_reboot = true; conf.provision.device_id = "device_id"; conf.provision.ecu_registration_endpoint = server + "/director/ecus"; diff --git a/tests/authenticate_cred_test.cc b/tests/authenticate_cred_test.cc new file mode 100644 index 0000000000..2d39230b09 --- /dev/null +++ b/tests/authenticate_cred_test.cc @@ -0,0 +1,62 @@ +#include + +#include + +#include +#include + +#include "authenticate.h" +#include "server_credentials.h" +#include "test_utils.h" +#include "treehub_server.h" +#include "utilities/utils.h" + +boost::filesystem::path good_test_cred; +boost::filesystem::path good_auth_json; + +/* Authenticate with OAuth2. + * Parse authentication information from treehub.json. */ +TEST(authenticate, good_zip) { + // Authenticates with the ATS portal to the SaaS instance. + ServerCredentials creds(good_test_cred); + EXPECT_EQ(creds.GetMethod(), AuthMethod::kOauth2); + TreehubServer treehub; + int r = authenticate("", creds, treehub); + EXPECT_EQ(0, r); +} + +/* Extract credentials from a provided JSON file. */ +TEST(authenticate, good_json) { + // Authenticates with the ATS portal to the SaaS instance. + // Outdated. we can probably get rid of the whole json-only authentication at this point. T + // he last time that was officially supported was over three years ago(2017) + // and it's been "deprecated" ever since. + TreehubServer treehub; + int r = authenticate("", ServerCredentials(good_auth_json), treehub); + EXPECT_EQ(0, r); +} + +#ifndef __NO_MAIN__ +int main(int argc, char **argv) { + std::string cmd_output; + ::testing::InitGoogleTest(&argc, argv); + if (argc != 2) { + std::cerr << "Error: " << argv[0] << " requires the path to the credential.zip.\n"; + return EXIT_FAILURE; + } + good_test_cred = argv[1]; + + // prepare auth_test_good.json, use treehub.json from SOTA_PACKED_CREDENTIALS + // extract treehub.json from and save it as auth_test_good.json. + TemporaryDirectory tmp_data_dir; + good_auth_json = tmp_data_dir / "auth_test_good.json"; + auto shell_cmd = std::string("unzip -p ") + argv[1] + std::string(" treehub.json >") + good_auth_json.string(); + if (Utils::shell(shell_cmd, &cmd_output) != 0) { + return -1; + } + + return RUN_ALL_TESTS(); +} +#endif + +// vim: set tabstop=2 shiftwidth=2 expandtab: diff --git a/tests/config/aktualizr_secondary.toml b/tests/config/aktualizr_secondary.toml index 845061faa1..4f4035a21c 100644 --- a/tests/config/aktualizr_secondary.toml +++ b/tests/config/aktualizr_secondary.toml @@ -1,11 +1,11 @@ [network] port = 9031 -[storage] -type = "sqlite" - [pacman] os = "testos" sysroot = "testsysroot" ostree_server = "test_server" packages_file = "/test_packages" + +[uptane] +verification_type = "Full" diff --git a/tests/config/basic.toml b/tests/config/basic.toml index fc9fa7f1ba..5c0b068521 100644 --- a/tests/config/basic.toml +++ b/tests/config/basic.toml @@ -1,5 +1,6 @@ [provision] provision_path = "tests/test_data/cred.zip" +mode = "SharedCredReuse" [tls] server = "https://7d0a4914-c392-4ccd-a8f9-3d4ed969da07.tcpgw.prod01.advancedtelematic.com:8000" diff --git a/tests/config/device_id.toml b/tests/config/device_id.toml index 62e104d4f6..749be7c100 100644 --- a/tests/config/device_id.toml +++ b/tests/config/device_id.toml @@ -3,4 +3,5 @@ key_type = "ED25519" [provision] provision_path = "tests/test_data/cred.zip" +mode = "SharedCredReuse" device_id = "test-name-123" diff --git a/tests/device_cred_prov_hsm_test.py b/tests/device_cred_prov_hsm_test.py index 38285a9598..c1d48de273 100755 --- a/tests/device_cred_prov_hsm_test.py +++ b/tests/device_cred_prov_hsm_test.py @@ -47,7 +47,6 @@ def main(): key_source = "pkcs11" [storage] -type = "sqlite" path = "{tmp_dir}" [import] diff --git a/tests/device_cred_prov_test.py b/tests/device_cred_prov_test.py index 7b3ac0964c..45a1303f43 100755 --- a/tests/device_cred_prov_test.py +++ b/tests/device_cred_prov_test.py @@ -31,7 +31,6 @@ def main(): [storage] path = "{tmp_dir}" -type = "sqlite" [import] base_path = "{tmp_dir}/import" diff --git a/tests/fake_discovery/discovery_secondary.py b/tests/fake_discovery/discovery_secondary.py index eaa56640e9..b3b003bac4 100755 --- a/tests/fake_discovery/discovery_secondary.py +++ b/tests/fake_discovery/discovery_secondary.py @@ -5,7 +5,7 @@ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) -server_address = ('127.0.0.1', int(sys.argv[1])) +server_address = ('localhost', int(sys.argv[1])) print('starting up on {} port {}'.format(*server_address)) sock.bind(server_address) diff --git a/tests/fake_http_server/ca.crt b/tests/fake_http_server/ca.crt deleted file mode 100644 index 6e8c15e817..0000000000 --- a/tests/fake_http_server/ca.crt +++ /dev/null @@ -1,14 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICMjCCAdigAwIBAgIJAIBVMAWe6TIAMAoGCCqGSM49BAMCMGwxCzAJBgNVBAYT -AkdCMQ8wDQYDVQQIDAZMb25kb24xDzANBgNVBAcMBkxvbmRvbjEVMBMGA1UECgwM -VGVzdCBST09UIENBMRAwDgYDVQQLDAdST09UIENBMRIwEAYDVQQDDAlsb2NhbGhv -c3QwHhcNMTkwNjI0MTI1NjUyWhcNMzkwNjE5MTI1NjUyWjBsMQswCQYDVQQGEwJH -QjEPMA0GA1UECAwGTG9uZG9uMQ8wDQYDVQQHDAZMb25kb24xFTATBgNVBAoMDFRl -c3QgUk9PVCBDQTEQMA4GA1UECwwHUk9PVCBDQTESMBAGA1UEAwwJbG9jYWxob3N0 -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEKC0kxvxrMlZhtqwtW16m2fxXLzSF -IkvsxffUWH33+2FRXDDz2IBFWkFOISWW7uaINZxAZ/VnMLgJxlHI+Ilgk6NjMGEw -HQYDVR0OBBYEFJloSlj2VR0GgKNMKs9+4Z+d5xtlMB8GA1UdIwQYMBaAFJloSlj2 -VR0GgKNMKs9+4Z+d5xtlMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGG -MAoGCCqGSM49BAMCA0gAMEUCIQCHSmp1KEdPXesAZ89mmK87i+fYtqsOFig5O3tP -iYDjQgIgE+7JkDn2W72YMryIRgsmFRgvdy/jX6KXdKnlXuQC9po= ------END CERTIFICATE----- diff --git a/tests/fake_http_server/fake_test_server.py b/tests/fake_http_server/fake_test_server.py index 3fdfa900d7..9721686912 100755 --- a/tests/fake_http_server/fake_test_server.py +++ b/tests/fake_http_server/fake_test_server.py @@ -45,27 +45,29 @@ def _serve_simple(self, uri): def serve_meta(self, uri): if self.server.meta_path is None: raise RuntimeError("Please supply a path for metadata") - self._serve_simple(self.server.meta_path + uri) + if not os.path.exists(self.server.meta_path + uri): + self.send_response(404) + self.end_headers() + else: + self.send_response(200) + self.end_headers() + self._serve_simple(self.server.meta_path + uri) def serve_target(self, filename): if self.server.target_path is None: raise RuntimeError("Please supply a path for targets") + self.send_response(200) + self.end_headers() self._serve_simple(self.server.target_path + filename) def do_GET(self): if self.path.startswith("/director/") and self.path.endswith(".json"): - self.send_response(200) - self.end_headers() role = self.path[len("/director/"):] self.serve_meta("/repo/director/" + role) elif self.path.startswith("/repo/") and self.path.endswith(".json"): - self.send_response(200) - self.end_headers() role = self.path[len("/repo/"):] self.serve_meta('/repo/repo/' + role) elif self.path.startswith("/repo/targets"): - self.send_response(200) - self.end_headers() filename = self.path[len("/repo/targets"):] self.serve_target(filename) @@ -114,6 +116,8 @@ def do_GET(self): for i in range(5): self.wfile.write(b'aa') sleep(1) + elif self.path == '/campaigner/campaigns': + self.serve_meta("/campaigns.json") elif self.path == '/user_agent': user_agent = self.headers.get('user-agent') self.send_response(200) diff --git a/tests/fake_http_server/tls_noauth_server.py b/tests/fake_http_server/tls_noauth_server.py deleted file mode 100755 index 1b35eb4220..0000000000 --- a/tests/fake_http_server/tls_noauth_server.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/python3 -from http.server import HTTPServer,SimpleHTTPRequestHandler -import socket -import ssl - -class ReUseHTTPServer(HTTPServer): - def server_bind(self): - self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - HTTPServer.server_bind(self) - -httpd = ReUseHTTPServer(('localhost', 2443), SimpleHTTPRequestHandler) -httpd.socket = ssl.wrap_socket (httpd.socket, - certfile='tests/fake_http_server/server.crt', - keyfile='tests/fake_http_server/server.key', - server_side=True, - ca_certs = "tests/fake_http_server/ca.crt") -httpd.serve_forever() diff --git a/tests/fake_http_server/tls_server.py b/tests/fake_http_server/tls_server.py deleted file mode 100755 index 033d337255..0000000000 --- a/tests/fake_http_server/tls_server.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/python3 -from http.server import HTTPServer,SimpleHTTPRequestHandler -import socket -import ssl - -class ReUseHTTPServer(HTTPServer): - def server_bind(self): - self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - HTTPServer.server_bind(self) - -httpd = ReUseHTTPServer(('localhost', 1443), SimpleHTTPRequestHandler) -httpd.socket = ssl.wrap_socket (httpd.socket, - certfile='tests/fake_http_server/server.crt', - keyfile='tests/fake_http_server/server.key', - server_side=True, - cert_reqs = ssl.CERT_REQUIRED, - ca_certs = "tests/fake_http_server/ca.crt") -httpd.serve_forever() diff --git a/tests/glib.supp b/tests/glib.supp index 4986f00669..c47fe883b0 100644 --- a/tests/glib.supp +++ b/tests/glib.supp @@ -341,6 +341,12 @@ ... fun:g_private_set } +{ + g_private_set_alloc0 + Memcheck:Leak + ... + fun:g_private_set_alloc0 +} { g_static_mutex_get_mutex_impl Memcheck:Leak diff --git a/tests/httpfake.h b/tests/httpfake.h index ec0f0724b7..64d038510d 100644 --- a/tests/httpfake.h +++ b/tests/httpfake.h @@ -17,21 +17,19 @@ #include "metafake.h" #include "utilities/utils.h" -enum class ProvisioningResult { kOK, kFailure }; - class HttpFake : public HttpInterface { public: // old style HttpFake with centralized multi repo and url rewriting - HttpFake(const boost::filesystem::path &test_dir_in, std::string flavor = "", - const boost::filesystem::path &meta_dir_in = "") - : test_dir(test_dir_in), flavor_(std::move(flavor)), meta_dir(meta_dir_in) { + explicit HttpFake(boost::filesystem::path test_dir_in, std::string flavor = "", + boost::filesystem::path meta_dir_in = "") + : test_dir(std::move(test_dir_in)), flavor_(std::move(flavor)), meta_dir(std::move(meta_dir_in)) { if (meta_dir.empty()) { meta_dir = temp_meta_dir.Path(); MetaFake meta(meta_dir); } } - virtual ~HttpFake() {} + ~HttpFake() override = default; void setCerts(const std::string &ca, CryptoSource ca_source, const std::string &cert, CryptoSource cert_source, const std::string &pkey, CryptoSource pkey_source) override { @@ -93,22 +91,30 @@ class HttpFake : public HttpInterface { } } + HttpResponse post(const std::string &url, const std::string &content_type, const std::string &data) override { + (void)url; + (void)content_type; + (void)data; + return HttpResponse({}, 200, CURLE_OK, ""); + } + HttpResponse post(const std::string &url, const Json::Value &data) override { if (url.find("/devices") != std::string::npos || url.find("/director/ecus") != std::string::npos || url.empty()) { - LOG_ERROR << "OK create device"; Utils::writeFile((test_dir / "post.json").string(), data); - if (provisioningResponse == ProvisioningResult::kOK) { - return HttpResponse(Utils::readFile("tests/test_data/cred.p12"), 200, CURLE_OK, ""); - } else { - return HttpResponse("", 400, CURLE_OK, ""); - } + return HttpResponse(Utils::readFile("tests/test_data/cred.p12"), 200, CURLE_OK, ""); } else if (url.find("/events") != std::string::npos) { return handle_event(url, data); } - return HttpResponse("", 400, CURLE_OK, ""); } + HttpResponse put(const std::string &url, const std::string &content_type, const std::string &data) override { + (void)url; + (void)content_type; + (void)data; + return HttpResponse({}, 200, CURLE_OK, ""); + } + HttpResponse put(const std::string &url, const Json::Value &data) override { last_manifest = data; return HttpResponse(url, 200, CURLE_OK, ""); @@ -130,7 +136,7 @@ class HttpFake : public HttpInterface { auto resp_future = resp_promise.get_future(); std::thread( [path, write_cb, progress_cb, userp, url](std::promise promise) { - std::string content = Utils::readFile(path.string()); + const std::string content = Utils::readFile(path.string()); for (unsigned int i = 0; i < content.size(); ++i) { write_cb(const_cast(&content[i]), 1, 1, userp); progress_cb(userp, 0, 0, 0, 0); @@ -152,7 +158,6 @@ class HttpFake : public HttpInterface { } const std::string tls_server = "https://tlsserver.com"; - ProvisioningResult provisioningResponse{ProvisioningResult::kOK}; Json::Value last_manifest; protected: @@ -160,16 +165,6 @@ class HttpFake : public HttpInterface { std::string flavor_; boost::filesystem::path meta_dir; TemporaryDirectory temp_meta_dir; - - private: - /** - * These are here to catch a common programming error where a Json::Value is - * implicitly constructed from a std::string. By having an private overload - * that takes string (and with no implementation), this will fail during - * compilation. - */ - HttpResponse post(const std::string &url, const std::string data); - HttpResponse put(const std::string &url, const std::string data); }; #endif // HTTPFAKE_H_ diff --git a/tests/ipsecondary_ostree_test.py b/tests/ipsecondary_ostree_test.py new file mode 100755 index 0000000000..a2edced93e --- /dev/null +++ b/tests/ipsecondary_ostree_test.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python3 + +import argparse +import logging +import time + +from os import getcwd, chdir, path + +from test_fixtures import with_aktualizr, with_uptane_backend, KeyStore, with_secondary, with_treehub,\ + with_sysroot, with_director, TestRunner, IPSecondary + +logger = logging.getLogger("IPSecondaryOstreeTest") + + +@with_treehub() +@with_uptane_backend() +@with_director() +@with_sysroot() +@with_secondary(start=False, output_logs=True) +@with_aktualizr(start=False, run_mode='once', output_logs=True) +def test_secondary_ostree_update(uptane_repo, secondary, aktualizr, treehub, sysroot, director, **kwargs): + """Test Secondary OSTree update if a boot order of Secondary and Primary is undefined""" + + target_rev = treehub.revision + expected_targetname = uptane_repo.add_ostree_target(secondary.id, target_rev, "GARAGE_TARGET_NAME") + + with secondary: + with aktualizr: + aktualizr.wait_for_completion() + + pending_rev = aktualizr.get_current_pending_image_info(secondary.id) + + if pending_rev != target_rev: + logger.error("Pending version {} != the target one {}".format(pending_rev, target_rev)) + return False + + sysroot.update_revision(pending_rev) + secondary.emulate_reboot() + + aktualizr.set_mode('full') + with aktualizr: + with secondary: + director.wait_for_install() + + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + installed_rev = aktualizr.get_current_image_info(secondary.id) + + if installed_rev != target_rev: + logger.error("Installed version {} != the target one {}".format(installed_rev, target_rev)) + return False + + if expected_targetname != director.get_ecu_manifest_filepath(secondary.id[1]): + logger.error( + "Target name doesn't match a filepath value of the reported manifest: expected: {}, actual: {}". + format(expected_targetname, director.get_ecu_manifest_filepath(secondary.id[1]))) + return False + + return True + + +@with_treehub() +@with_uptane_backend() +@with_director() +@with_sysroot() +@with_secondary(start=False, output_logs=False, force_reboot=True) +@with_aktualizr(start=False, run_mode='once', output_logs=True) +def test_secondary_ostree_reboot(uptane_repo, secondary, aktualizr, treehub, sysroot, director, **kwargs): + """Test Secondary OSTree update with automatic (forced) reboot""" + target_rev = treehub.revision + uptane_repo.add_ostree_target(secondary.id, target_rev, "GARAGE_TARGET_NAME") + + with secondary: + with aktualizr: + aktualizr.wait_for_completion() + secondary.wait_for_completion() + + pending_rev = aktualizr.get_current_pending_image_info(secondary.id) + + if pending_rev != target_rev: + logger.error("Pending version {} != the target one {}".format(pending_rev, target_rev)) + return False + + sysroot.update_revision(pending_rev) + + aktualizr.set_mode('full') + with aktualizr: + with secondary: + director.wait_for_install() + + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + installed_rev = aktualizr.get_current_image_info(secondary.id) + + if installed_rev != target_rev: + logger.error("Installed version {} != the target one {}".format(installed_rev, target_rev)) + return False + + return True + + +# test suit runner +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + + parser = argparse.ArgumentParser(description='Test IP Secondary with OSTree') + parser.add_argument('-b', '--build-dir', help='build directory', default='build') + parser.add_argument('-s', '--src-dir', help='source directory', default='.') + + input_params = parser.parse_args() + + KeyStore.base_dir = path.abspath(input_params.src_dir) + initial_cwd = getcwd() + chdir(input_params.build_dir) + + test_suite = [ + test_secondary_ostree_update, + test_secondary_ostree_reboot, + ] + + with TestRunner(test_suite) as runner: + test_suite_run_result = runner.run() + + chdir(initial_cwd) + exit(0 if test_suite_run_result else 1) diff --git a/tests/ipsecondary_rotation_test.py b/tests/ipsecondary_rotation_test.py new file mode 100755 index 0000000000..a431b9b266 --- /dev/null +++ b/tests/ipsecondary_rotation_test.py @@ -0,0 +1,300 @@ +#!/usr/bin/env python3 + +import argparse +import logging +import time + +from os import getcwd, chdir, path + +from test_fixtures import with_aktualizr, with_uptane_backend, KeyStore, with_secondary, with_treehub,\ + with_sysroot, with_director, TestRunner, IPSecondary + +logger = logging.getLogger("IPSecondaryRotationTest") + + +@with_uptane_backend() +@with_director() +@with_secondary(start=False, output_logs=True) +@with_aktualizr(start=False, output_logs=True) +def test_secondary_director_root_rotation(uptane_repo, secondary, aktualizr, director, **kwargs): + '''Test Secondary update after rotating the Director Root twice''' + + # add a new image to the repo in order to update the Secondary with it and + # thus get initial metadata stored in the Secondary. + secondary_image_filename = "secondary_image_filename.img" + secondary_image_hash = uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) + + logger.debug("Trying to update ECU {} with the image {}". + format(secondary.id, (secondary_image_hash, secondary_image_filename))) + + # start Secondary and aktualizr processes, aktualizr is started in 'once' mode + with secondary, aktualizr: + aktualizr.wait_for_completion() + + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + # check currently installed hash + if secondary_image_hash != aktualizr.get_current_image_info(secondary.id): + logger.error("Target image hash doesn't match the currently installed hash") + return False + + if secondary_image_filename != director.get_ecu_manifest_filepath(secondary.id[1]): + logger.error("Target name doesn't match a filepath value of the reported manifest: {}".format(director.get_manifest())) + return False + + # Rotate Director Root twice to make sure putRoot will be used. + uptane_repo.rotate_root(is_director=True) + uptane_repo.rotate_root(is_director=True) + + # Add another image to the repo in order to update the Secondary with it + # and thus send metadata again. + uptane_repo.clear_targets() + secondary_image_filename = "another_secondary_image.img" + secondary_image_hash = uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) + + logger.debug("Trying to update ECU {} with the image {}". + format(secondary.id, (secondary_image_hash, secondary_image_filename))) + + # start Secondary and aktualizr processes, aktualizr is started in 'once' mode + with secondary, aktualizr: + aktualizr.wait_for_completion() + + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + # check currently installed hash + if secondary_image_hash != aktualizr.get_current_image_info(secondary.id): + logger.error("Target image hash doesn't match the currently installed hash") + return False + + if secondary_image_filename != director.get_ecu_manifest_filepath(secondary.id[1]): + logger.error("Target name doesn't match a filepath value of the reported manifest: {}".format(director.get_manifest())) + return False + + return True + + +@with_uptane_backend() +@with_director() +@with_secondary(start=False, output_logs=True) +@with_aktualizr(start=False, output_logs=True) +def test_secondary_image_root_rotation(uptane_repo, secondary, aktualizr, director, **kwargs): + '''Test Secondary update after rotating the Image repo Root twice''' + + # add a new image to the repo in order to update the Secondary with it and + # thus get initial metadata stored in the Secondary. + secondary_image_filename = "secondary_image_filename.img" + secondary_image_hash = uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) + + logger.debug("Trying to update ECU {} with the image {}". + format(secondary.id, (secondary_image_hash, secondary_image_filename))) + + # start Secondary and aktualizr processes, aktualizr is started in 'once' mode + with secondary, aktualizr: + aktualizr.wait_for_completion() + + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + # check currently installed hash + if secondary_image_hash != aktualizr.get_current_image_info(secondary.id): + logger.error("Target image hash doesn't match the currently installed hash") + return False + + if secondary_image_filename != director.get_ecu_manifest_filepath(secondary.id[1]): + logger.error("Target name doesn't match a filepath value of the reported manifest: {}".format(director.get_manifest())) + return False + + # Rotate Director Root twice to make sure putRoot will be used. + uptane_repo.rotate_root(is_director=False) + uptane_repo.rotate_root(is_director=False) + + # Add another image to the repo in order to update the Secondary with it + # and thus send metadata again. + uptane_repo.clear_targets() + secondary_image_filename = "another_secondary_image.img" + secondary_image_hash = uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) + + logger.debug("Trying to update ECU {} with the image {}". + format(secondary.id, (secondary_image_hash, secondary_image_filename))) + + # start Secondary and aktualizr processes, aktualizr is started in 'once' mode + with secondary, aktualizr: + aktualizr.wait_for_completion() + + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + # check currently installed hash + if secondary_image_hash != aktualizr.get_current_image_info(secondary.id): + logger.error("Target image hash doesn't match the currently installed hash") + return False + + if secondary_image_filename != director.get_ecu_manifest_filepath(secondary.id[1]): + logger.error("Target name doesn't match a filepath value of the reported manifest: {}".format(director.get_manifest())) + return False + + return True + + +@with_uptane_backend() +@with_director() +@with_secondary(start=False, output_logs=True, verification_type="Tuf") +@with_aktualizr(start=False, output_logs=True) +def test_secondary_tuf_director_root_rotation(uptane_repo, secondary, aktualizr, director, **kwargs): + '''Test Secondary update with TUF verification after rotating the Director Root twice''' + + # add a new image to the repo in order to update the Secondary with it and + # thus get initial metadata stored in the Secondary. + secondary_image_filename = "secondary_image_filename.img" + secondary_image_hash = uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) + + logger.debug("Trying to update ECU {} with the image {}". + format(secondary.id, (secondary_image_hash, secondary_image_filename))) + + # start Secondary and aktualizr processes, aktualizr is started in 'once' mode + with secondary, aktualizr: + aktualizr.wait_for_completion() + + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + # check currently installed hash + if secondary_image_hash != aktualizr.get_current_image_info(secondary.id): + logger.error("Target image hash doesn't match the currently installed hash") + return False + + if secondary_image_filename != director.get_ecu_manifest_filepath(secondary.id[1]): + logger.error("Target name doesn't match a filepath value of the reported manifest: {}".format(director.get_manifest())) + return False + + # Rotate Director Root twice. Note that the Secondary won't care. + uptane_repo.rotate_root(is_director=True) + uptane_repo.rotate_root(is_director=True) + + # Add another image to the repo in order to update the Secondary with it + # and thus send metadata again. + uptane_repo.clear_targets() + secondary_image_filename = "another_secondary_image.img" + secondary_image_hash = uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename, custom_version='2') + + logger.debug("Trying to update ECU {} with the image {}". + format(secondary.id, (secondary_image_hash, secondary_image_filename))) + + # start Secondary and aktualizr processes, aktualizr is started in 'once' mode + with secondary, aktualizr: + aktualizr.wait_for_completion() + + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + # check currently installed hash + if secondary_image_hash != aktualizr.get_current_image_info(secondary.id): + logger.error("Target image hash doesn't match the currently installed hash") + return False + + if secondary_image_filename != director.get_ecu_manifest_filepath(secondary.id[1]): + logger.error("Target name doesn't match a filepath value of the reported manifest: {}".format(director.get_manifest())) + return False + + return True + + +@with_uptane_backend() +@with_director() +@with_secondary(start=False, output_logs=True, verification_type="Tuf") +@with_aktualizr(start=False, output_logs=True) +def test_secondary_tuf_image_root_rotation(uptane_repo, secondary, aktualizr, director, **kwargs): + '''Test Secondary update with TUF verification after rotating the Image repo Root twice''' + + # add a new image to the repo in order to update the Secondary with it and + # thus get initial metadata stored in the Secondary. + secondary_image_filename = "secondary_image_filename.img" + secondary_image_hash = uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) + + logger.debug("Trying to update ECU {} with the image {}". + format(secondary.id, (secondary_image_hash, secondary_image_filename))) + + # start Secondary and aktualizr processes, aktualizr is started in 'once' mode + with secondary, aktualizr: + aktualizr.wait_for_completion() + + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + # check currently installed hash + if secondary_image_hash != aktualizr.get_current_image_info(secondary.id): + logger.error("Target image hash doesn't match the currently installed hash") + return False + + if secondary_image_filename != director.get_ecu_manifest_filepath(secondary.id[1]): + logger.error("Target name doesn't match a filepath value of the reported manifest: {}".format(director.get_manifest())) + return False + + # Rotate Director Root twice to make sure putRoot will be used. + uptane_repo.rotate_root(is_director=False) + uptane_repo.rotate_root(is_director=False) + + # Add another image to the repo in order to update the Secondary with it + # and thus send metadata again. + uptane_repo.clear_targets() + secondary_image_filename = "another_secondary_image.img" + secondary_image_hash = uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename, custom_version='2') + + logger.debug("Trying to update ECU {} with the image {}". + format(secondary.id, (secondary_image_hash, secondary_image_filename))) + + # start Secondary and aktualizr processes, aktualizr is started in 'once' mode + with secondary, aktualizr: + aktualizr.wait_for_completion() + + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + # check currently installed hash + if secondary_image_hash != aktualizr.get_current_image_info(secondary.id): + logger.error("Target image hash doesn't match the currently installed hash") + return False + + if secondary_image_filename != director.get_ecu_manifest_filepath(secondary.id[1]): + logger.error("Target name doesn't match a filepath value of the reported manifest: {}".format(director.get_manifest())) + return False + + return True + + +# test suit runner +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + + parser = argparse.ArgumentParser(description='Test IP Secondary Root rotation') + parser.add_argument('-b', '--build-dir', help='build directory', default='build') + parser.add_argument('-s', '--src-dir', help='source directory', default='.') + + input_params = parser.parse_args() + + KeyStore.base_dir = path.abspath(input_params.src_dir) + initial_cwd = getcwd() + chdir(input_params.build_dir) + + test_suite = [ + test_secondary_director_root_rotation, + test_secondary_image_root_rotation, + test_secondary_tuf_director_root_rotation, + test_secondary_tuf_image_root_rotation, + ] + + with TestRunner(test_suite) as runner: + test_suite_run_result = runner.run() + + chdir(initial_cwd) + exit(0 if test_suite_run_result else 1) diff --git a/tests/ipsecondary_test.py b/tests/ipsecondary_test.py index daf903c83f..296890df72 100755 --- a/tests/ipsecondary_test.py +++ b/tests/ipsecondary_test.py @@ -1,23 +1,24 @@ #!/usr/bin/env python3 -import logging import argparse +import logging +import time -from os import getcwd, chdir +from os import getcwd, chdir, path -from test_fixtures import with_aktualizr, with_uptane_backend, KeyStore, with_secondary +from test_fixtures import with_aktualizr, with_uptane_backend, KeyStore, with_secondary, with_treehub,\ + with_sysroot, with_director, TestRunner, IPSecondary logger = logging.getLogger("IPSecondaryTest") -# The following is a test suit intended for IP Secondary integration testing @with_uptane_backend() @with_secondary(start=True) @with_aktualizr(start=False, output_logs=False) def test_secondary_update_if_secondary_starts_first(uptane_repo, secondary, aktualizr, **kwargs): '''Test Secondary update if Secondary is booted before Primary''' - # add a new image to the repo in order to update the secondary with it + # add a new image to the repo in order to update the Secondary with it secondary_image_filename = "secondary_image_filename_001.img" secondary_image_hash = uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) @@ -25,7 +26,7 @@ def test_secondary_update_if_secondary_starts_first(uptane_repo, secondary, aktu format(secondary.id, (secondary_image_hash, secondary_image_filename))) with aktualizr: - # run aktualizr once, secondary has been already running + # run aktualizr once, Secondary has been already running aktualizr.wait_for_completion() test_result = secondary_image_hash == aktualizr.get_current_image_info(secondary.id) @@ -39,14 +40,14 @@ def test_secondary_update_if_secondary_starts_first(uptane_repo, secondary, aktu def test_secondary_update_if_primary_starts_first(uptane_repo, secondary, aktualizr, **kwargs): '''Test Secondary update if Secondary is booted after Primary''' - # add a new image to the repo in order to update the secondary with it + # add a new image to the repo in order to update the Secondary with it secondary_image_filename = "secondary_image_filename_001.img" secondary_image_hash = uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) logger.debug("Trying to update ECU {} with the image {}". format(secondary.id, (secondary_image_hash, secondary_image_filename))) with secondary: - # start secondary, aktualizr has been already started in 'once' mode + # start Secondary, aktualizr has been already started in 'once' mode aktualizr.wait_for_completion() test_result = secondary_image_hash == aktualizr.get_current_image_info(secondary.id) @@ -55,78 +56,388 @@ def test_secondary_update_if_primary_starts_first(uptane_repo, secondary, aktual @with_uptane_backend() -@with_secondary(start=False) -@with_aktualizr(start=False, output_logs=False) -def test_secondary_update(uptane_repo, secondary, aktualizr, **kwargs): - '''Test Secondary update if a boot order of Secondary and Primary is undefined''' - - test_result = True - number_of_updates = 1 - ii = 0 - while ii < number_of_updates and test_result: - # add a new image to the repo in order to update the secondary with it - secondary_image_filename = "secondary_image_filename_{}.img".format(ii) - secondary_image_hash = uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) - - logger.debug("Trying to update ECU {} with the image {}". - format(secondary.id, (secondary_image_hash, secondary_image_filename))) - - # start Secondary and Aktualizr processes, aktualizr is started in 'once' mode - with secondary, aktualizr: +@with_director() +@with_secondary(start=False, output_logs=True) +@with_aktualizr(start=False, output_logs=True) +def test_secondary_update(uptane_repo, secondary, aktualizr, director, **kwargs): + '''Test Secondary update if the boot order of Secondary and Primary is undefined''' + + # add a new image to the repo in order to update the Secondary with it + secondary_image_filename = "secondary_image_filename.img" + secondary_image_hash = uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) + + logger.debug("Trying to update ECU {} with the image {}". + format(secondary.id, (secondary_image_hash, secondary_image_filename))) + + # start Secondary and aktualizr processes, aktualizr is started in 'once' mode + with secondary, aktualizr: + aktualizr.wait_for_completion() + + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + # check currently installed hash + if secondary_image_hash != aktualizr.get_current_image_info(secondary.id): + logger.error("Target image hash doesn't match the currently installed hash") + return False + + # check updated file + update_file = path.join(secondary.storage_dir.name, "firmware.txt") + if not path.exists(update_file): + logger.error("Expected updated file does not exist: {}".format(update_file)) + return False + + if secondary_image_filename != director.get_ecu_manifest_filepath(secondary.id[1]): + logger.error("Target name doesn't match a filepath value of the reported manifest: {}".format(director.get_manifest())) + return False + + return True + + +@with_uptane_backend() +@with_director() +@with_secondary(start=False, output_logs=True, verification_type="Tuf") +@with_aktualizr(start=False, output_logs=True) +def test_secondary_tuf_update(uptane_repo, secondary, aktualizr, director, **kwargs): + '''Test Secondary update with TUF verification''' + + # add a new image to the repo in order to update the Secondary with it + secondary_image_filename = "secondary_image_filename.img" + secondary_image_hash = uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) + + logger.debug("Trying to update ECU {} with the image {}". + format(secondary.id, (secondary_image_hash, secondary_image_filename))) + + # start Secondary and aktualizr processes, aktualizr is started in 'once' mode + with secondary, aktualizr: + aktualizr.wait_for_completion() + + # check currently installed hash + if secondary_image_hash != aktualizr.get_current_image_info(secondary.id): + logger.error("Target image hash doesn't match the currently installed hash") + return False + + # check updated file + update_file = path.join(secondary.storage_dir.name, "firmware.txt") + if not path.exists(update_file): + logger.error("Expected updated file does not exist: {}".format(update_file)) + return False + + return True + + +@with_uptane_backend() +@with_secondary(start=True, id=('hwid1', 'serial1'), output_logs=False) +@with_aktualizr(start=False, output_logs=True) +def test_add_secondary(uptane_repo, secondary, aktualizr, **kwargs): + '''Test adding a Secondary after registration''' + + with aktualizr: + aktualizr.wait_for_completion() + + if not aktualizr.is_ecu_registered(secondary.id): + logger.error("Secondary ECU is not registered.") + return False + + with IPSecondary(output_logs=False, id=('hwid1', 'serial2')) as secondary2: + # Why is this necessary? The Primary waiting works outside of this test. + time.sleep(5) + aktualizr.add_secondary(secondary2) + with aktualizr: aktualizr.wait_for_completion() - test_result = secondary_image_hash == aktualizr.get_current_image_info(secondary.id) - logger.debug("Update result: {}".format("success" if test_result else "failed")) - ii += 1 + if not aktualizr.is_ecu_registered(secondary.id) or not aktualizr.is_ecu_registered(secondary2.id): + logger.error("Secondary ECU is not registered.") + return False - return test_result + return True + + +@with_uptane_backend() +@with_secondary(start=True, id=('hwid1', 'serial1'), output_logs=False) +@with_secondary(start=True, id=('hwid1', 'serial2'), output_logs=False, arg_name='secondary2') +@with_aktualizr(start=False, output_logs=True) +def test_remove_secondary(uptane_repo, secondary, secondary2, aktualizr, **kwargs): + '''Test removing a Secondary after registration''' + + with aktualizr: + aktualizr.wait_for_completion() + + if not aktualizr.is_ecu_registered(secondary.id) or not aktualizr.is_ecu_registered(secondary2.id): + logger.error("Secondary ECU is not registered.") + return False + + aktualizr.remove_secondary(secondary2) + with aktualizr: + aktualizr.wait_for_completion() + + if not aktualizr.is_ecu_registered(secondary.id): + logger.error("Secondary ECU is not registered.") + return False + if aktualizr.is_ecu_registered(secondary2.id): + logger.error("Secondary ECU is unexpectedly still registered.") + return False + + return True + + +@with_uptane_backend() +@with_secondary(start=True, id=('hwid1', 'serial1'), output_logs=False) +@with_secondary(start=True, id=('hwid1', 'serial2'), output_logs=False, arg_name='secondary2') +@with_aktualizr(start=False, output_logs=True) +def test_replace_secondary(uptane_repo, secondary, secondary2, aktualizr, **kwargs): + '''Test replacing a Secondary after registration''' + + with aktualizr: + aktualizr.wait_for_completion() + + if not aktualizr.is_ecu_registered(secondary.id) or not aktualizr.is_ecu_registered(secondary2.id): + logger.error("Secondary ECU is not registered.") + return False + + aktualizr.remove_secondary(secondary2) + + with IPSecondary(output_logs=False, id=('hwid1', 'serial3')) as secondary3: + # Why is this necessary? The Primary waiting works outside of this test. + time.sleep(5) + aktualizr.add_secondary(secondary3) + with aktualizr: + aktualizr.wait_for_completion() + + if not aktualizr.is_ecu_registered(secondary.id) or not aktualizr.is_ecu_registered(secondary3.id): + logger.error("Secondary ECU is not registered.") + return False + if aktualizr.is_ecu_registered(secondary2.id): + logger.error("Secondary ECU is unexpectedly still registered.") + return False + + return True + + +@with_uptane_backend() +@with_secondary(start=True, id=('hwid1', 'serial1'), output_logs=False) +@with_aktualizr(start=False, output_logs=True) +def test_replace_secondary_same_port(uptane_repo, secondary, aktualizr, **kwargs): + '''Test replacing a Secondary that reuses the same port''' + + port = IPSecondary.get_free_port() + with IPSecondary(output_logs=False, id=('hwid1', 'serial2'), port=port) as secondary2: + # Why is this necessary? The Primary waiting works outside of this test. + time.sleep(5) + aktualizr.add_secondary(secondary2) + with aktualizr: + aktualizr.wait_for_completion() + + if not aktualizr.is_ecu_registered(secondary.id) or not aktualizr.is_ecu_registered(secondary2.id): + logger.error("Secondary ECU is not registered.") + return False + + aktualizr.remove_secondary(secondary2) + + with IPSecondary(output_logs=False, id=('hwid1', 'serial3'), port=port) as secondary3: + # Why is this necessary? The Primary waiting works outside of this test. + time.sleep(5) + aktualizr.add_secondary(secondary3) + with aktualizr: + aktualizr.wait_for_completion() + + if not aktualizr.is_ecu_registered(secondary.id) or not aktualizr.is_ecu_registered(secondary3.id): + logger.error("Secondary ECU is not registered.") + return False + if aktualizr.is_ecu_registered(secondary2.id): + logger.error("Secondary ECU is unexpectedly still registered.") + return False + + return True + + +@with_uptane_backend() +@with_secondary(start=True, id=('hwid1', 'serial1'), output_logs=False) +@with_aktualizr(start=False, output_logs=True) +def test_replace_secondary_same_port_tuf(uptane_repo, secondary, aktualizr, **kwargs): + '''Test replacing a Secondary that reuses the same port but uses TUF verification''' + + port = IPSecondary.get_free_port() + with IPSecondary(output_logs=False, id=('hwid1', 'serial2'), port=port, verification_type="Tuf") as secondary2: + # Why is this necessary? The Primary waiting works outside of this test. + time.sleep(5) + aktualizr.add_secondary(secondary2) + with aktualizr: + aktualizr.wait_for_completion() + + if not aktualizr.is_ecu_registered(secondary.id) or not aktualizr.is_ecu_registered(secondary2.id): + logger.error("Secondary ECU is not registered.") + return False + + aktualizr.remove_secondary(secondary2) + + with IPSecondary(output_logs=False, id=('hwid1', 'serial3'), port=port) as secondary3: + # Why is this necessary? The Primary waiting works outside of this test. + time.sleep(5) + aktualizr.add_secondary(secondary3) + with aktualizr: + aktualizr.wait_for_completion() + + if not aktualizr.is_ecu_registered(secondary.id) or not aktualizr.is_ecu_registered(secondary3.id): + logger.error("Secondary ECU is not registered.") + return False + if aktualizr.is_ecu_registered(secondary2.id): + logger.error("Secondary ECU is unexpectedly still registered.") + return False + + return True + + +@with_uptane_backend() +@with_secondary(start=True, id=('hwid1', 'serial1'), output_logs=False) +@with_aktualizr(start=False, output_logs=True) +def test_change_secondary_port(uptane_repo, secondary, aktualizr, **kwargs): + '''Test changing a Secondary's port but not the ECU serial''' + + with IPSecondary(output_logs=False, id=('hwid1', 'serial2')) as secondary2: + # Why is this necessary? The Primary waiting works outside of this test. + time.sleep(5) + aktualizr.add_secondary(secondary2) + with aktualizr: + aktualizr.wait_for_completion() + + if not aktualizr.is_ecu_registered(secondary.id) or not aktualizr.is_ecu_registered(secondary2.id): + logger.error("Secondary ECU is not registered.") + return False + + aktualizr.remove_secondary(secondary2) + + with IPSecondary(output_logs=False, id=('hwid1', 'serial2')) as secondary3: + # Why is this necessary? The Primary waiting works outside of this test. + time.sleep(5) + aktualizr.add_secondary(secondary3) + with aktualizr: + aktualizr.wait_for_completion() + + if secondary2.port == secondary3.port: + logger.error("Secondary ECU port unexpectedly did not change!") + return False + + if not aktualizr.is_ecu_registered(secondary.id) or not aktualizr.is_ecu_registered(secondary3.id): + logger.error("Secondary ECU is not registered.") + return False + + return True + + +@with_uptane_backend() +@with_director() +@with_secondary(start=False) +@with_aktualizr(start=False, secondary_wait_sec=1, output_logs=False) +def test_secondary_install_timeout(uptane_repo, secondary, aktualizr, director, **kwargs): + '''Test that Secondary install fails after a timeout if the Secondary never connects''' + + # run aktualizr and secondary and wait until the device/aktualizr is registered + with aktualizr, secondary: + aktualizr.wait_for_completion() + + # the secondary must be registered + if not aktualizr.is_ecu_registered(secondary.id): + return False + + # make sure that the secondary is not running + if secondary.is_running(): + return False + + # launch an update on secondary without it + secondary_image_filename = "secondary_image_filename_001.img" + uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) + + aktualizr.update_wait_timeout(0.1) + with aktualizr: + aktualizr.wait_for_completion() + + manifest = director.get_manifest() + result_code = manifest["signed"]["installation_report"]["report"]["result"]["code"] + if result_code != "INTERNAL_ERROR": + logger.error("Wrong result code {}".format(result_code)) + return False + + return not director.get_install_result() @with_uptane_backend() @with_secondary(start=False) @with_aktualizr(start=False, output_logs=False, wait_timeout=0.1) def test_primary_timeout_during_first_run(uptane_repo, secondary, aktualizr, **kwargs): - '''Test Aktualizr's timeout of waiting for Secondaries during initial boot''' + """Test Aktualizr's timeout of waiting for Secondaries during initial boot""" secondary_image_filename = "secondary_image_filename_001.img" - secondary_image_hash = uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) + uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) logger.debug("Checking Aktualizr behaviour if it timeouts while waiting for a connection from the secondary") - # just start the aktualizr and expect that it timeouts on waiting for a connection from the secondary - # so the secondary is not registered at the device and backend + # just start the aktualizr and expect that it timeouts on waiting for a connection from the Secondary + # so the Secondary is not registered at the device and backend with aktualizr: aktualizr.wait_for_completion() - return not aktualizr.is_ecu_registered(secondary.id) + info = aktualizr.get_info() + if info is None: + return False + not_provisioned = 'Provisioned on server: no' in info + + return not_provisioned and not aktualizr.is_ecu_registered(secondary.id) @with_uptane_backend() +@with_director() @with_secondary(start=False) +@with_aktualizr(start=False, output_logs=True) +def test_primary_wait_secondary_install(uptane_repo, secondary, aktualizr, director, **kwargs): + """Test that Primary waits for Secondary to connect before installing""" + + # provision device with a secondary + with secondary, aktualizr: + aktualizr.wait_for_completion() + + secondary_image_filename = "secondary_image_filename.img" + uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) + + with aktualizr: + time.sleep(10) + with secondary: + aktualizr.wait_for_completion() + + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + return True + + +@with_uptane_backend() +@with_secondary(start=False, output_logs=False) @with_aktualizr(start=False, output_logs=False) def test_primary_timeout_after_device_is_registered(uptane_repo, secondary, aktualizr, **kwargs): - '''Test Aktualizr's timeout of waiting for Secondaries after the device/aktualizr was registered at the backend''' + '''Test Aktualizr's timeout of waiting for Secondaries after the device was registered with the backend''' - # run aktualizr and secondary and wait until the device/aktualizr is registered + # run aktualizr and Secondary and wait until the device/aktualizr is registered with aktualizr, secondary: aktualizr.wait_for_completion() - # the secondary must be registered + # the Secondary must be registered if not aktualizr.is_ecu_registered(secondary.id): return False - # make sure that the secondary is not running + # make sure that the Secondary is not running if secondary.is_running(): return False - # run just aktualizr, the previously registered secondary is off - # and check if the primary ECU is updatable if the secondary is not connected + # run just aktualizr, the previously registered Secondary is off + # and check if the Primary ECU is updatable if the Secondary is not connected primary_image_filename = "primary_image_filename_001.img" primary_image_hash = uptane_repo.add_image(id=aktualizr.id, image_filename=primary_image_filename) - # if a new image for the not-connected secondary is specified in the target + # if a new image for the not-connected Secondary is specified in the target # then nothing is going to be updated, including the image intended for - # healthy primary ECU + # healthy Primary ECU # secondary_image_filename = "secondary_image_filename_001.img" # secondary_image_hash = uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) @@ -134,8 +445,97 @@ def test_primary_timeout_after_device_is_registered(uptane_repo, secondary, aktu with aktualizr: aktualizr.wait_for_completion() - return (aktualizr.get_current_primary_image_info() == primary_image_hash)\ - and not aktualizr.is_ecu_registered(secondary.id) + return aktualizr.get_current_primary_image_info() == primary_image_hash + + +@with_uptane_backend() +@with_secondary(start=False) +@with_secondary(start=False, arg_name='secondary2') +@with_aktualizr(start=False, output_logs=True) +def test_primary_multiple_secondaries(uptane_repo, secondary, secondary2, aktualizr, **kwargs): + '''Test Aktualizr with multiple IP secondaries''' + + with aktualizr, secondary, secondary2: + aktualizr.wait_for_completion() + + if not aktualizr.is_ecu_registered(secondary.id) or not aktualizr.is_ecu_registered(secondary2.id): + return False + + return True + secondary_image_filename = "secondary_image_filename.img" + uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) + uptane_repo.add_image(id=secondary2.id, image_filename=secondary_image_filename) + + with aktualizr: + time.sleep(10) + with secondary: + aktualizr.wait_for_completion() + + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + return True + + +@with_uptane_backend() +@with_secondary(start=False, verification_type="Tuf") +@with_secondary(start=False, verification_type="Tuf", arg_name='secondary2') +@with_aktualizr(start=False, output_logs=True) +def test_primary_multiple_secondaries_tuf(uptane_repo, secondary, secondary2, aktualizr, **kwargs): + '''Test Aktualizr with multiple IP secondaries using TUF verification''' + + with aktualizr, secondary, secondary2: + aktualizr.wait_for_completion() + + if not aktualizr.is_ecu_registered(secondary.id) or not aktualizr.is_ecu_registered(secondary2.id): + return False + + return True + secondary_image_filename = "secondary_image_filename.img" + uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) + uptane_repo.add_image(id=secondary2.id, image_filename=secondary_image_filename) + + with aktualizr: + time.sleep(10) + with secondary: + aktualizr.wait_for_completion() + + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + return True + + +@with_uptane_backend() +@with_secondary(start=False, verification_type="Tuf") +@with_secondary(start=False, arg_name='secondary2') +@with_aktualizr(start=False, output_logs=True) +def test_primary_multiple_secondaries_mixed(uptane_repo, secondary, secondary2, aktualizr, **kwargs): + '''Test Aktualizr with multiple IP secondaries, one of which uses TUF verification''' + + with aktualizr, secondary, secondary2: + aktualizr.wait_for_completion() + + if not aktualizr.is_ecu_registered(secondary.id) or not aktualizr.is_ecu_registered(secondary2.id): + return False + + return True + secondary_image_filename = "secondary_image_filename.img" + uptane_repo.add_image(id=secondary.id, image_filename=secondary_image_filename) + uptane_repo.add_image(id=secondary2.id, image_filename=secondary_image_filename) + + with aktualizr: + time.sleep(10) + with secondary: + aktualizr.wait_for_completion() + + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + return True # test suit runner @@ -145,24 +545,35 @@ def test_primary_timeout_after_device_is_registered(uptane_repo, secondary, aktu parser = argparse.ArgumentParser(description='Test IP Secondary') parser.add_argument('-b', '--build-dir', help='build directory', default='build') parser.add_argument('-s', '--src-dir', help='source directory', default='.') + input_params = parser.parse_args() - KeyStore.base_dir = input_params.src_dir + KeyStore.base_dir = path.abspath(input_params.src_dir) initial_cwd = getcwd() chdir(input_params.build_dir) - test_suite = [test_secondary_update_if_secondary_starts_first, - test_secondary_update_if_primary_starts_first, - test_secondary_update, - test_primary_timeout_during_first_run, - test_primary_timeout_after_device_is_registered] - - test_suite_run_result = True - for test in test_suite: - logger.info('>>> Running {}...'.format(test.__name__)) - test_run_result = test() - logger.info('>>> {}: {}'.format('OK' if test_run_result else 'Failed', test.__name__)) - test_suite_run_result = test_suite_run_result and test_run_result + test_suite = [ + test_secondary_update_if_secondary_starts_first, + test_secondary_update_if_primary_starts_first, + test_secondary_update, + test_secondary_tuf_update, + test_add_secondary, + test_remove_secondary, + test_replace_secondary, + test_replace_secondary_same_port, + test_replace_secondary_same_port_tuf, + test_change_secondary_port, + test_secondary_install_timeout, + test_primary_timeout_during_first_run, + test_primary_wait_secondary_install, + test_primary_timeout_after_device_is_registered, + test_primary_multiple_secondaries, + test_primary_multiple_secondaries_tuf, + test_primary_multiple_secondaries_mixed, + ] + + with TestRunner(test_suite) as runner: + test_suite_run_result = runner.run() chdir(initial_cwd) exit(0 if test_suite_run_result else 1) diff --git a/tests/leak_test.cc b/tests/leak_test.cc index 52f89a5e3e..1a5094bd5b 100644 --- a/tests/leak_test.cc +++ b/tests/leak_test.cc @@ -3,11 +3,15 @@ /** * A test case that leaks memory, to check that we can spot this in valgrind */ -TEST(Leak, ThisTestLeaks) { EXPECT_TRUE(new int[45]); } +TEST(Leak, ThisTestLeaks) { + int* temp = new int[45]; + int temp2 = *temp++; + std::cout << temp2; +} #ifndef __NO_MAIN__ -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } -#endif \ No newline at end of file +#endif diff --git a/tests/load_tests/fetch-credentials.sh b/tests/load_tests/fetch-credentials.sh deleted file mode 100755 index ffd9d5b93e..0000000000 --- a/tests/load_tests/fetch-credentials.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -set -uex - -readonly ACCOUNT_ID=${1} -readonly CRYPT_HOST=${2} - -accountUrl="http://${CRYPT_HOST}/accounts/${ACCOUNT_ID}" -readonly hostName=$(http --ignore-stdin ${accountUrl} | jq --raw-output .hostName) - -credentialsId=$(http --ignore-stdin POST ${accountUrl}/credentials/registration description="load test $(date)" ttl=24 | jq --raw-output .id) - -http --ignore-stdin ${accountUrl}/credentials/registration/${credentialsId} > autoprov_credentials.p12 - -zip -q prov.zip autoprov_credentials.p12 - -echo ${hostName} \ No newline at end of file diff --git a/tests/memory_usage_test.sh b/tests/memory_usage_test.sh deleted file mode 100755 index c6f6fa95dc..0000000000 --- a/tests/memory_usage_test.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash -e - -TEMP_DIR=$(mktemp -d)/aktualizr -MEMMORY_LIMIT=10485760 #10 Mib - -trap 'kill %1; rm -rf $TEMP_DIR' EXIT - -PATH=$PATH:"$1/src/uptane_generator/" -./src/uptane_generator/run/create_repo.sh $TEMP_DIR localhost - -TARGET_SIZE="200M" - -mkdir -p $TEMP_DIR/deb/DEBIAN -mkdir -p $TEMP_DIR/deb/usr/share -fallocate -l $TARGET_SIZE $TEMP_DIR/deb/usr/share/target1 - -cat > $TEMP_DIR/deb/DEBIAN/control < -Description: Some fake package - This is fake package. -EOF - -dpkg-deb -Znone -b $TEMP_DIR/deb $TEMP_DIR/good.deb -PATH="tests/test_data/fake_dpkg":$PATH -$1/src/uptane_generator/uptane-generator image --path $TEMP_DIR/uptane --filename $TEMP_DIR/good.deb --targetname good.deb --hwid desktop -$1/src/uptane_generator/uptane-generator addtarget --path $TEMP_DIR/uptane --targetname good.deb --hwid desktop --serial serial1 -$1/src/uptane_generator/uptane-generator signtargets --path $TEMP_DIR/uptane - - -sed -i 's/\[provision\]/\[provision\]\nprimary_ecu_serial = serial1/g' "$TEMP_DIR/sota.toml" -sed -i 's/type = "none"/type = "debian"/g' "$TEMP_DIR/sota.toml" - -./src/uptane_generator/run/serve_repo.py 9000 "$TEMP_DIR" & - -valgrind --tool=massif --massif-out-file=$1/massif.out $1/src/aktualizr_primary/aktualizr --loglevel 1 --config "$TEMP_DIR/sota.toml" --run-mode once - -HEAP_MEMMORY=$(grep -oP 'mem_heap_B=\K.*' $1/massif.out | sort -nr | head -n 1) -echo -n "Heap memory usage is " -echo $HEAP_MEMMORY | numfmt --to=iec-i -if [ "$HEAP_MEMMORY" -gt "$MEMMORY_LIMIT" ]; then - echo "Test fail because of memory consumption is greater then $MEMMORY_LIMIT bytes" - exit 1 -fi diff --git a/tests/metafake.h b/tests/metafake.h index 3e093b02fb..8047cb059e 100644 --- a/tests/metafake.h +++ b/tests/metafake.h @@ -3,9 +3,10 @@ #include #include +#include #include -#include +#include #include "logging/logging.h" #include "uptane_repo.h" @@ -13,8 +14,10 @@ class MetaFake { public: - MetaFake(const boost::filesystem::path &meta_dir_in) - : meta_dir(meta_dir_in), work_dir(meta_dir / "fake_meta"), repo(work_dir, "2021-07-04T16:33:27Z", "id0") { + explicit MetaFake(boost::filesystem::path meta_dir_in) + : meta_dir(std::move(meta_dir_in)), + work_dir(meta_dir / "fake_meta"), + repo(work_dir, "2025-07-04T16:33:27Z", "id0") { repo.generateRepo(KeyType::kED25519); backup(); create_image(); @@ -25,54 +28,53 @@ class MetaFake { void create_testData(void) { boost::filesystem::path file_name; std::string hwid; - Delegation delegation; - // add image for "has update" meta + // add image for "has update" metadata file_name = "dummy_firmware.txt"; - repo.addImage(work_dir / file_name, file_name, "dummy", "", delegation); + repo.addImage(work_dir / file_name, file_name, "dummy"); file_name = "primary_firmware.txt"; hwid = "primary_hw"; - repo.addImage(work_dir / file_name, file_name, hwid, "", delegation); - repo.addTarget(file_name.string(), hwid, "CA:FE:A6:D2:84:9D", ""); + repo.addImage(work_dir / file_name, file_name, hwid); + repo.addTarget(file_name.string(), hwid, "CA:FE:A6:D2:84:9D"); file_name = "secondary_firmware.txt"; hwid = "secondary_hw"; - repo.addImage(work_dir / file_name, file_name, hwid, "", delegation); - repo.addTarget(file_name.string(), hwid, "secondary_ecu_serial", ""); + repo.addImage(work_dir / file_name, file_name, hwid); + repo.addTarget(file_name.string(), hwid, "secondary_ecu_serial"); repo.signTargets(); rename("_hasupdates"); - // add image for "no update" meta + // add image for "no update" metadata restore(); file_name = "dummy_firmware.txt"; - repo.addImage(work_dir / file_name, file_name, "dummy", "", delegation); + repo.addImage(work_dir / file_name, file_name, "dummy"); repo.signTargets(); rename("_noupdates"); - // add image for "multi secondary ecu" meta + // add image for "multi secondary ecu" metadata restore(); file_name = "dummy_firmware.txt"; - repo.addImage(work_dir / file_name, file_name, "dummy", "", delegation); + repo.addImage(work_dir / file_name, file_name, "dummy"); file_name = "secondary_firmware.txt"; hwid = "sec_hw1"; - repo.addImage(work_dir / file_name, file_name, hwid, "", delegation); - repo.addTarget(file_name.string(), hwid, "sec_serial1", ""); + repo.addImage(work_dir / file_name, file_name, hwid); + repo.addTarget(file_name.string(), hwid, "sec_serial1"); file_name = "secondary_firmware2.txt"; hwid = "sec_hw2"; - repo.addImage(work_dir / file_name, file_name, hwid, "", delegation); - repo.addTarget(file_name.string(), hwid, "sec_serial2", ""); + repo.addImage(work_dir / file_name, file_name, hwid); + repo.addTarget(file_name.string(), hwid, "sec_serial2"); repo.signTargets(); rename("_multisec"); - // copy meta to work_dir + // copy metadata to work_dir Utils::copyDir(work_dir / ImageRepo::dir, meta_dir / "repo"); Utils::copyDir(work_dir / DirectorRepo::dir, meta_dir / "director"); if (!boost::filesystem::exists(meta_dir / "campaigner") && diff --git a/tests/ostree-scripts/makephysical.sh b/tests/ostree-scripts/makephysical.sh index 493a60bb37..9fb9a339f6 100755 --- a/tests/ostree-scripts/makephysical.sh +++ b/tests/ostree-scripts/makephysical.sh @@ -37,11 +37,11 @@ OSTREE_DIR=$(mktemp -d /tmp/ostreephys-XXXXXX) trap 'kill %1' EXIT # Wait for http server to start serving. This can take a while sometimes. - until curl 127.0.0.1:"$PORT" &> /dev/null; do + until curl localhost:"$PORT" &> /dev/null; do sleep 0.2 done - ostree --repo="$OSTREE_SYSROOT/ostree/repo" remote add --no-gpg-verify generate-remote "http://127.0.0.1:$PORT" $BRANCHNAME + ostree --repo="$OSTREE_SYSROOT/ostree/repo" remote add --no-gpg-verify generate-remote "http://localhost:$PORT" $BRANCHNAME ostree --repo="$OSTREE_SYSROOT/ostree/repo" pull generate-remote $BRANCHNAME ) diff --git a/tests/ostree_mock.c b/tests/ostree_mock.c index 79aa6dfa97..6360239504 100644 --- a/tests/ostree_mock.c +++ b/tests/ostree_mock.c @@ -5,7 +5,7 @@ #include /** - * @brief This mock allows the ostree package manager usage on non-ostree booted environment, e.g. local host + * @brief This mock allows the OSTree package manager usage on non-OSTree booted environment, e.g. local host * * in order to use it the following has to be pre-defined * 1) OSTREE_DEPLOYMENT_VERSION_FILE environment variable that points to the test file containing a revision diff --git a/tests/prov_test_common.py b/tests/prov_test_common.py index d7823a1dce..973d7291ac 100644 --- a/tests/prov_test_common.py +++ b/tests/prov_test_common.py @@ -8,7 +8,7 @@ def verify_provisioned(akt_info, conf): stdout, stderr, retcode = run_subprocess([str(akt_info), '--config', str(conf), '--wait-until-provisioned']) machine = platform.node() if (b'Device ID: ' not in stdout or - b'Primary ecu hardware ID: ' + machine.encode() not in stdout or + b'Primary ECU hardware ID: ' + machine.encode() not in stdout or b'Fetched metadata: yes' not in stdout): print('Provisioning failed: ' + stderr.decode() + stdout.decode()) return 1 diff --git a/tests/run_certprovider_test.sh b/tests/run_certprovider_test.sh index ee49693f0c..fe1b81292f 100755 --- a/tests/run_certprovider_test.sh +++ b/tests/run_certprovider_test.sh @@ -1,9 +1,9 @@ #!/usr/bin/env bash DIR=$(mktemp -d) -mkdir -p ${DIR} +mkdir -p "${DIR}" -$1 --credentials $2 --device-ca $3 --device-ca-key $4 --local ${DIR} -openssl verify -CAfile $3 ${DIR}/client.pem +$1 --credentials "$2" --device-ca "$3" --device-ca-key "$4" --local "${DIR}" +openssl verify -CAfile "$3" "${DIR}"/client.pem -rm -r ${DIR} +rm -r "${DIR}" diff --git a/tests/run_debian_tests.sh b/tests/run_debian_tests.sh deleted file mode 100755 index a17e142f47..0000000000 --- a/tests/run_debian_tests.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env bash -set -e -PATH="$2":$PATH - -exec $1 \ No newline at end of file diff --git a/tests/run_expired_test.sh b/tests/run_expired_test.sh index f3210157b6..be2f338350 100755 --- a/tests/run_expired_test.sh +++ b/tests/run_expired_test.sh @@ -2,12 +2,12 @@ set -xeuo pipefail TEMP_DIR=/tmp/temp_aktualizr_expire_repo/$(mktemp -d)/$1 -"$2/src/uptane_generator/uptane-generator" generate $TEMP_DIR --expires=$1 -"$2/src/uptane_generator/uptane-generator" image $TEMP_DIR ./tests/test_data/credentials.zip --hwid test_hwid +"$2/src/uptane_generator/uptane-generator" generate "$TEMP_DIR" --expires="$1" +"$2/src/uptane_generator/uptane-generator" image "$TEMP_DIR" ./tests/test_data/credentials.zip --hwid test_hwid -cp ./tests/test_data/credentials.zip $TEMP_DIR +cp ./tests/test_data/credentials.zip "$TEMP_DIR" -PORT=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()') +PORT=$(python3 -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()') TREEHUB="{\ \"oauth2\": { \"server\": \"http://localhost:$PORT\",\ @@ -19,20 +19,20 @@ TREEHUB="{\ }\ }" -echo $TREEHUB > $TEMP_DIR/treehub.json -echo "http://localhost:$PORT" > $TEMP_DIR/tufrepo.url +echo "$TREEHUB" > "$TEMP_DIR"/treehub.json +echo "http://localhost:$PORT" > "$TEMP_DIR"/tufrepo.url -pushd $TEMP_DIR -zip -r $TEMP_DIR/credentials.zip treehub.json tufrepo.url +pushd "$TEMP_DIR" +zip -r "$TEMP_DIR"/credentials.zip treehub.json tufrepo.url popd function finish { kill %1 - rm -rf $TEMP_DIR + rm -rf "$TEMP_DIR" } -./tests/fake_http_server/fake_api_server.py $PORT $TEMP_DIR & +./tests/fake_http_server/fake_api_server.py "$PORT" "$TEMP_DIR" & trap finish EXIT sleep 2 -"$2/src/sota_tools/garage-check" -j $TEMP_DIR/credentials.zip -r 714581b2ffbbf7a750cb0a210fa7d74fd9128bd627cd4913e365d5bf2f66eba9 +"$2/src/sota_tools/garage-check" -j "$TEMP_DIR"/credentials.zip -r 714581b2ffbbf7a750cb0a210fa7d74fd9128bd627cd4913e365d5bf2f66eba9 diff --git a/tests/run_import_clash_test.sh b/tests/run_import_clash_test.sh index 11fd2123c1..af2ca5cae3 100755 --- a/tests/run_import_clash_test.sh +++ b/tests/run_import_clash_test.sh @@ -1,21 +1,21 @@ #!/usr/bin/env bash get_variable() { - sed -ne "/\[import\]/,/\[.*\]/s/$1\s*=\s*\"\(.*\)\".*\$/\1/p" < $2 + sed -ne "/\[import\]/,/\[.*\]/s/$1\s*=\s*\"\(.*\)\".*\$/\1/p" < "$2" } -for f in $1/*.toml; do - if [[ $( get_variable tls_cacert_path $f ) = "/var/sota/root.crt" ]]; then +for f in "$1"/*.toml; do + if [[ $( get_variable tls_cacert_path "$f" ) = "/var/sota/root.crt" ]]; then echo "import.tls_cacert_path in $f is the same as path for FS->SQL migration (/var/sota/root.crt)" >&2 exit 1; fi - if [[ $(get_variable tls_pkey_path $f) = "/var/sota/pkey.pem" ]]; then + if [[ $(get_variable tls_pkey_path "$f") = "/var/sota/pkey.pem" ]]; then echo "import.tls_pkey_path in $f is the same as path for FS->SQL migration (/var/sota/pkey)" >&2 exit 1; fi - if [[ $(get_variable tls_clientcert_path $f) = "/var/sota/client.pem" ]]; then + if [[ $(get_variable tls_clientcert_path "$f") = "/var/sota/client.pem" ]]; then echo "import.tls_clientcert_path in $f is the same as path for FS->SQL migration (/var/sota/client.pem)" >&2 exit 1; fi diff --git a/tests/run_vector_tests.sh b/tests/run_vector_tests.sh index afa54e593a..6857f13803 100755 --- a/tests/run_vector_tests.sh +++ b/tests/run_vector_tests.sh @@ -29,6 +29,7 @@ if [ ! -f venv/bin/activate ]; then python3 -m venv venv fi +# shellcheck disable=SC1091 . venv/bin/activate TTV_DIR="$TESTS_SRC_DIR/tuf-test-vectors" @@ -59,9 +60,9 @@ while ! curl -I -s -f "http://localhost:$PORT"; do done if [[ -n $VALGRIND ]]; then - "$VALGRIND" "$UPTANE_VECTOR_TEST" "$PORT" "$@" + "$VALGRIND" "$UPTANE_VECTOR_TEST" "$PORT" "$TESTS_SRC_DIR" "$@" else - "$UPTANE_VECTOR_TEST" "$PORT" "$@" + "$UPTANE_VECTOR_TEST" "$PORT" "$TESTS_SRC_DIR" "$@" fi RES=$? diff --git a/tests/shared_cred_prov_test.py b/tests/shared_cred_prov_test.py index 0316e9f334..6b98e39bd7 100755 --- a/tests/shared_cred_prov_test.py +++ b/tests/shared_cred_prov_test.py @@ -28,10 +28,10 @@ def main(): [provision] provision_path = "{creds}" +mode = "SharedCredReuse" [storage] path = "{tmp_dir}" -type = "sqlite" sqldb_path = "{db}" ''' diff --git a/tests/sota_tools/auth_test_good.json b/tests/sota_tools/auth_test_good.json deleted file mode 100644 index a7888130dc..0000000000 --- a/tests/sota_tools/auth_test_good.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "oauth2": { - "server": "https://auth-plus.atsgarage.com", - "client_id": "045dd735-061e-4b45-8352-1c517b0225a0", - "client_secret": "6QrLTSOmnZ" - }, - "ostree": { - "server": "https://treehub.atsgarage.com/api/v3" - } -} \ No newline at end of file diff --git a/tests/sota_tools/auth_test_good.zip b/tests/sota_tools/auth_test_good.zip deleted file mode 100644 index 35cd443b0c..0000000000 Binary files a/tests/sota_tools/auth_test_good.zip and /dev/null differ diff --git a/tests/sota_tools/authentication/generate-certs.sh b/tests/sota_tools/authentication/generate-certs.sh new file mode 100755 index 0000000000..baf7ad25af --- /dev/null +++ b/tests/sota_tools/authentication/generate-certs.sh @@ -0,0 +1,88 @@ +#!/bin/bash +set -eEuo pipefail + +if [ "$#" -lt 1 ]; then + echo "Usage: $0 " + exit 1 +fi + +DEST_DIR="$1" + +mkdir -p "$DEST_DIR" +trap 'rm -rf "$DEST_DIR"' ERR +cd "$DEST_DIR" + +cat << 'EOF' > intermediate.ext +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer +basicConstraints=critical, CA:TRUE, pathlen:0 +keyUsage = critical, digitalSignature, keyCertSign, cRLSign +EOF + +cat << 'EOF' > client.ext +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid,issuer +basicConstraints=CA:FALSE +keyUsage=critical, digitalSignature, nonRepudiation, keyEncipherment +extendedKeyUsage=clientAuth, emailProtection +EOF + + +# Generate server's key and cert +openssl ecparam -name prime256v1 -genkey -noout -out server.key +openssl req -x509 -new -nodes -key server.key -sha256 -days 3650 -out server.crt -subj "/C=DE/ST=Berlin/L=Berlin/O=Test SERVER CA/OU=ServerCA/CN=localhost" + +# Generate client chain +## Root CA +openssl ecparam -name prime256v1 -genkey -noout -out ca.key +openssl req -x509 -new -nodes -key ca.key -sha256 -days 3650 -out ca.crt -subj "/C=DE/ST=Berlin/L=Berlin/O=Test ROOT CA/OU=ROOT CA/CN=localhost" + +## Intermediate CA +openssl ecparam -name prime256v1 -genkey -noout -out intermediate.key +openssl req -new -nodes -key intermediate.key -out intermediate.csr -subj "/C=DE/ST=Berlin/L=Berlin/O=Test Intermediate CA/OU=Intermediate CA/CN=localhost" +openssl x509 -req -CA ca.crt -CAkey ca.key -CAcreateserial -extfile intermediate.ext -sha256 -days +3650 -in intermediate.csr -out intermediate.crt + +## Client CA +openssl ecparam -name prime256v1 -genkey -noout -out client.key +openssl req -new -nodes -key client.key -out client.csr -subj "/C=DE/ST=Berlin/L=Berlin/O=Test Client/OU=client-namespace/CN=localhost" +openssl x509 -req -CA intermediate.crt -CAkey intermediate.key -CAcreateserial -extfile client.ext -sha256 -days +3650 -in client.csr -out client.crt + +## P12 Archive +openssl pkcs12 -export -out client_good.p12 -inkey client.key -in client.crt -certfile intermediate.crt -password pass: -nodes + + +# Generate rogue chain +## Root CA +openssl ecparam -name prime256v1 -genkey -noout -out rogue_ca.key +openssl req -x509 -new -nodes -key rogue_ca.key -sha256 -days 3650 -out rogue_ca.crt -subj "/C=DE/ST=Berlin/L=Berlin/O=Test ROOT CA/OU=ROOT CA/CN=localhost" + +## Intermediate CA +openssl ecparam -name prime256v1 -genkey -noout -out rogue_intermediate.key +openssl req -new -nodes -key rogue_intermediate.key -out rogue_intermediate.csr -subj "/C=DE/ST=Berlin/L=Berlin/O=Test Intermediate CA/OU=Intermediate CA/CN=localhost" +openssl x509 -req -CA rogue_ca.crt -CAkey rogue_ca.key -CAcreateserial -extfile intermediate.ext -sha256 -days +3650 -in rogue_intermediate.csr -out rogue_intermediate.crt + +## Client CA +openssl ecparam -name prime256v1 -genkey -noout -out rogue_client.key +openssl req -new -nodes -key rogue_client.key -out rogue_client.csr -subj "/C=DE/ST=Berlin/L=Berlin/O=Test Client/OU=client-namespace/CN=localhost" +openssl x509 -req -CA rogue_intermediate.crt -CAkey rogue_intermediate.key -CAcreateserial -extfile client.ext -sha256 -days +3650 -in rogue_client.csr -out rogue_client.crt + +## P12 Archive +openssl pkcs12 -export -out client_bad.p12 -inkey rogue_client.key -in rogue_client.crt -certfile rogue_intermediate.crt -password pass: -nodes + +cat << 'EOF' > treehub.json +{ + "ostree": { + "server": "https://localhost:1443/" + } +} +EOF + +cat << 'EOF' > tufrepo.url +https://localhost:1443/ +EOF + +cp client_good.p12 client_auth.p12 +zip -j good.zip client_auth.p12 treehub.json tufrepo.url + +cp client_bad.p12 client_auth.p12 +zip -j bad.zip client_auth.p12 treehub.json tufrepo.url diff --git a/tests/sota_tools/authentication/tls_server.py b/tests/sota_tools/authentication/tls_server.py new file mode 100755 index 0000000000..175b2680f9 --- /dev/null +++ b/tests/sota_tools/authentication/tls_server.py @@ -0,0 +1,26 @@ +#!/usr/bin/python3 +from http.server import HTTPServer,SimpleHTTPRequestHandler +import argparse +import os.path +import socket +import ssl + +class ReUseHTTPServer(HTTPServer): + def server_bind(self): + self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + HTTPServer.server_bind(self) + +parser = argparse.ArgumentParser() +parser.add_argument('port', type=int) +parser.add_argument('cert_path') +parser.add_argument('--noauth', action='store_true') +args = parser.parse_args() + +httpd = ReUseHTTPServer(('localhost', args.port), SimpleHTTPRequestHandler) +httpd.socket = ssl.wrap_socket (httpd.socket, + certfile=os.path.join(args.cert_path, 'server.crt'), + keyfile=os.path.join(args.cert_path, 'server.key'), + server_side=True, + cert_reqs = ssl.CERT_NONE if args.noauth else ssl.CERT_REQUIRED, + ca_certs = os.path.join(args.cert_path, 'ca.crt')) +httpd.serve_forever() diff --git a/tests/sota_tools/cert_generation/client_bad.p12 b/tests/sota_tools/cert_generation/client_bad.p12 deleted file mode 100644 index e9cf3f0205..0000000000 Binary files a/tests/sota_tools/cert_generation/client_bad.p12 and /dev/null differ diff --git a/tests/sota_tools/cert_generation/client_good.p12 b/tests/sota_tools/cert_generation/client_good.p12 deleted file mode 100644 index 3c7c200b30..0000000000 Binary files a/tests/sota_tools/cert_generation/client_good.p12 and /dev/null differ diff --git a/tests/sota_tools/cert_generation/generate-zips.sh b/tests/sota_tools/cert_generation/generate-zips.sh deleted file mode 100755 index 8b1829f203..0000000000 --- a/tests/sota_tools/cert_generation/generate-zips.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -set -eEuo pipefail - -if [ "$#" -lt 1 ]; then - echo "Usage: $0 " - exit 1 -fi - -SRC_DIR=$(dirname "$0") -DEST_DIR="$1" - -if [[ -f $DEST_DIR/good.zip && \ - $DEST_DIR/good.zip -nt $SRC_DIR/client_good.p12 && \ - $DEST_DIR/good.zip -nt $SRC_DIR/client_bad.p12 && \ - $DEST_DIR/good.zip -nt $SRC_DIR/turepo.url ]]; then - exit 0 -fi - -mkdir -p "$DEST_DIR" -trap 'rm -rf "$DEST_DIR"' ERR - -cat << 'EOF' > "$DEST_DIR/treehub.json" -{ - "ostree": { - "server": "https://localhost:1443/" - } -} -EOF - -cp "$SRC_DIR/client_good.p12" "$DEST_DIR/client_auth.p12" -zip -j "$DEST_DIR/good.zip" "$DEST_DIR/client_auth.p12" "$DEST_DIR/treehub.json" "$SRC_DIR/tufrepo.url" -rm "$DEST_DIR/client_auth.p12" - -cp "$SRC_DIR/client_bad.p12" "$DEST_DIR/client_auth.p12" -zip -j "$DEST_DIR/bad.zip" "$DEST_DIR/client_auth.p12" "$DEST_DIR/treehub.json" "$SRC_DIR/tufrepo.url" -rm "$DEST_DIR/client_auth.p12" diff --git a/tests/sota_tools/cert_generation/tufrepo.url b/tests/sota_tools/cert_generation/tufrepo.url deleted file mode 100644 index 71ca932132..0000000000 --- a/tests/sota_tools/cert_generation/tufrepo.url +++ /dev/null @@ -1 +0,0 @@ -https://localhost:1443/ diff --git a/tests/sota_tools/corrupt-repo/config b/tests/sota_tools/corrupt-repo/config new file mode 100644 index 0000000000..18e52e7acf --- /dev/null +++ b/tests/sota_tools/corrupt-repo/config @@ -0,0 +1,3 @@ +[core] +repo_version=1 +mode=archive-z2 diff --git a/tests/sota_tools/corrupt-repo/objects/01/42752678cbbaf2387511b20369de586d8a20f2bcd23e314de7efb011cee6bd.filez b/tests/sota_tools/corrupt-repo/objects/01/42752678cbbaf2387511b20369de586d8a20f2bcd23e314de7efb011cee6bd.filez new file mode 100644 index 0000000000..bf0eb7a034 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/01/42752678cbbaf2387511b20369de586d8a20f2bcd23e314de7efb011cee6bd.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/0f/b076ff2a293f90deeb84b22d93f34f911ff407991afab14994f09fe2b555af.filez b/tests/sota_tools/corrupt-repo/objects/0f/b076ff2a293f90deeb84b22d93f34f911ff407991afab14994f09fe2b555af.filez new file mode 100644 index 0000000000..96f526ddc9 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/0f/b076ff2a293f90deeb84b22d93f34f911ff407991afab14994f09fe2b555af.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/19/826d5eb5bbbe1b288c31e7aa027b9ff5f6f039c8ebd9a822a4d6069e319f7b.filez b/tests/sota_tools/corrupt-repo/objects/19/826d5eb5bbbe1b288c31e7aa027b9ff5f6f039c8ebd9a822a4d6069e319f7b.filez new file mode 100644 index 0000000000..303c7c6ae8 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/19/826d5eb5bbbe1b288c31e7aa027b9ff5f6f039c8ebd9a822a4d6069e319f7b.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/1b/3d3d02c78ae84b1df197dea06fd57f8fd744f8e220af5dbe6550825e887f2f.filez b/tests/sota_tools/corrupt-repo/objects/1b/3d3d02c78ae84b1df197dea06fd57f8fd744f8e220af5dbe6550825e887f2f.filez new file mode 100644 index 0000000000..197233ad27 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/1b/3d3d02c78ae84b1df197dea06fd57f8fd744f8e220af5dbe6550825e887f2f.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/1e/4bcd70097ffc6fad96b72ad6cc4563523a407a5745242fa623a0b12c915fd5.filez b/tests/sota_tools/corrupt-repo/objects/1e/4bcd70097ffc6fad96b72ad6cc4563523a407a5745242fa623a0b12c915fd5.filez new file mode 100644 index 0000000000..dac03c142c Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/1e/4bcd70097ffc6fad96b72ad6cc4563523a407a5745242fa623a0b12c915fd5.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/1f/0bcc1122376bbb77fdc28a2db6ebcd7b91c86f99c4077b335877471999c13e.filez b/tests/sota_tools/corrupt-repo/objects/1f/0bcc1122376bbb77fdc28a2db6ebcd7b91c86f99c4077b335877471999c13e.filez new file mode 100644 index 0000000000..20c731b03e Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/1f/0bcc1122376bbb77fdc28a2db6ebcd7b91c86f99c4077b335877471999c13e.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/1f/428640df2d2faf5d74be1eee9198d749e8a6e02ee73c188f7cb5f61fbba7b7.filez b/tests/sota_tools/corrupt-repo/objects/1f/428640df2d2faf5d74be1eee9198d749e8a6e02ee73c188f7cb5f61fbba7b7.filez new file mode 100644 index 0000000000..46a5b29ceb Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/1f/428640df2d2faf5d74be1eee9198d749e8a6e02ee73c188f7cb5f61fbba7b7.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/2a/28dac42b76c2015ee3c41cc4183bb8b5c790fd21fa5cfa0802c6e11fd0edbe.dirmeta b/tests/sota_tools/corrupt-repo/objects/2a/28dac42b76c2015ee3c41cc4183bb8b5c790fd21fa5cfa0802c6e11fd0edbe.dirmeta new file mode 100644 index 0000000000..0e24141384 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/2a/28dac42b76c2015ee3c41cc4183bb8b5c790fd21fa5cfa0802c6e11fd0edbe.dirmeta differ diff --git a/tests/sota_tools/corrupt-repo/objects/2b/09331f9dfbc6cee88d0c8752f16ed272711626340d1eb07ea91db763c4bd49.filez b/tests/sota_tools/corrupt-repo/objects/2b/09331f9dfbc6cee88d0c8752f16ed272711626340d1eb07ea91db763c4bd49.filez new file mode 100644 index 0000000000..fac91ed224 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/2b/09331f9dfbc6cee88d0c8752f16ed272711626340d1eb07ea91db763c4bd49.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/2c/0d1082803f3a5e460ab655d3f2b4f6a4cdc27d2af7ec50cf7eb6b4c0e9ec44.filez b/tests/sota_tools/corrupt-repo/objects/2c/0d1082803f3a5e460ab655d3f2b4f6a4cdc27d2af7ec50cf7eb6b4c0e9ec44.filez new file mode 100644 index 0000000000..a67b09e33d Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/2c/0d1082803f3a5e460ab655d3f2b4f6a4cdc27d2af7ec50cf7eb6b4c0e9ec44.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/2c/ac7dc6ae681da93348750014007bd544897590f1d891236fc9c079bd3d6c56.filez b/tests/sota_tools/corrupt-repo/objects/2c/ac7dc6ae681da93348750014007bd544897590f1d891236fc9c079bd3d6c56.filez new file mode 100644 index 0000000000..3d85b5cb5f Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/2c/ac7dc6ae681da93348750014007bd544897590f1d891236fc9c079bd3d6c56.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/2e/e758031340b51db1c0229bddd8f64bca4b131728d2bfb20c0c8671b1259a38.filez b/tests/sota_tools/corrupt-repo/objects/2e/e758031340b51db1c0229bddd8f64bca4b131728d2bfb20c0c8671b1259a38.filez new file mode 100644 index 0000000000..e505c9e68a Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/2e/e758031340b51db1c0229bddd8f64bca4b131728d2bfb20c0c8671b1259a38.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/37/a4aedf99dbbc3a9b41f984b0f8f749b79d58253c080885c753fbec162b56f2.filez b/tests/sota_tools/corrupt-repo/objects/37/a4aedf99dbbc3a9b41f984b0f8f749b79d58253c080885c753fbec162b56f2.filez new file mode 100644 index 0000000000..e36cbfee0c Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/37/a4aedf99dbbc3a9b41f984b0f8f749b79d58253c080885c753fbec162b56f2.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/38/cd77a5fc304d8262823861fd9e1fdbadf2c10e1584f355abd829b1dac8fc1a.filez b/tests/sota_tools/corrupt-repo/objects/38/cd77a5fc304d8262823861fd9e1fdbadf2c10e1584f355abd829b1dac8fc1a.filez new file mode 100644 index 0000000000..b58c02cb93 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/38/cd77a5fc304d8262823861fd9e1fdbadf2c10e1584f355abd829b1dac8fc1a.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/3f/f3885d085e0db2ba72fc04903a19643b61dc6e4605f9ebc01ac68f5d80ae8f.filez b/tests/sota_tools/corrupt-repo/objects/3f/f3885d085e0db2ba72fc04903a19643b61dc6e4605f9ebc01ac68f5d80ae8f.filez new file mode 100644 index 0000000000..6bd5bf1082 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/3f/f3885d085e0db2ba72fc04903a19643b61dc6e4605f9ebc01ac68f5d80ae8f.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/41/45b1a9bade30efb28ff921f7a555ff82ba7d3b7b83b968084436167912fa83.filez b/tests/sota_tools/corrupt-repo/objects/41/45b1a9bade30efb28ff921f7a555ff82ba7d3b7b83b968084436167912fa83.filez new file mode 100644 index 0000000000..4f63f87224 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/41/45b1a9bade30efb28ff921f7a555ff82ba7d3b7b83b968084436167912fa83.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/41/f b/tests/sota_tools/corrupt-repo/objects/41/f new file mode 100644 index 0000000000..31c441ad86 --- /dev/null +++ b/tests/sota_tools/corrupt-repo/objects/41/f @@ -0,0 +1,3 @@ +00000000: 0000 001a 0000 0000 0000 0000 0000 000a ................ +00000010: 0000 03e8 0000 03e8 0000 81b4 0000 0000 ................ +00000020: 0019 f3b8 c675 77e9 86a2 7be9 ffff .....uw...{... diff --git a/tests/sota_tools/corrupt-repo/objects/45/619fe329a170dac790076173a006f943a0c1389ce6b2388553a916b13d6f11.filez b/tests/sota_tools/corrupt-repo/objects/45/619fe329a170dac790076173a006f943a0c1389ce6b2388553a916b13d6f11.filez new file mode 100644 index 0000000000..4eb1c51a94 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/45/619fe329a170dac790076173a006f943a0c1389ce6b2388553a916b13d6f11.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/45/8c35ff8681d59c725d872e45c4625da7713ac66b88003620a3ee6759c0979c.filez b/tests/sota_tools/corrupt-repo/objects/45/8c35ff8681d59c725d872e45c4625da7713ac66b88003620a3ee6759c0979c.filez new file mode 100644 index 0000000000..35365dcf16 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/45/8c35ff8681d59c725d872e45c4625da7713ac66b88003620a3ee6759c0979c.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/47/a23ec7842ab8271f5e9e26b74daeffcbbca4d6aebca1370199d5fc25283b10.filez b/tests/sota_tools/corrupt-repo/objects/47/a23ec7842ab8271f5e9e26b74daeffcbbca4d6aebca1370199d5fc25283b10.filez new file mode 100644 index 0000000000..35a95ea88b Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/47/a23ec7842ab8271f5e9e26b74daeffcbbca4d6aebca1370199d5fc25283b10.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/4b/9b8f3f925e2e0100f72f1794ffc9a2a88fbfd252044db975b171fb353ffaf6.filez b/tests/sota_tools/corrupt-repo/objects/4b/9b8f3f925e2e0100f72f1794ffc9a2a88fbfd252044db975b171fb353ffaf6.filez new file mode 100644 index 0000000000..38c3c27435 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/4b/9b8f3f925e2e0100f72f1794ffc9a2a88fbfd252044db975b171fb353ffaf6.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/4e/ad9440afa4766b9c2c0d7435caa16d9a7f25dbce0f65964f9cb615b5e80788.filez b/tests/sota_tools/corrupt-repo/objects/4e/ad9440afa4766b9c2c0d7435caa16d9a7f25dbce0f65964f9cb615b5e80788.filez new file mode 100644 index 0000000000..423963b082 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/4e/ad9440afa4766b9c2c0d7435caa16d9a7f25dbce0f65964f9cb615b5e80788.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/50/9316f159b17e068afd8b6153081f6b1dc4bace5aa7303d0f3d0eaaeb5ae611.filez b/tests/sota_tools/corrupt-repo/objects/50/9316f159b17e068afd8b6153081f6b1dc4bace5aa7303d0f3d0eaaeb5ae611.filez new file mode 100644 index 0000000000..04d06cdf16 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/50/9316f159b17e068afd8b6153081f6b1dc4bace5aa7303d0f3d0eaaeb5ae611.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/50/a3e25feff71ce0ed49a252cd8e8d0875ff068ec519294909b4a9d891387da8.dirtree b/tests/sota_tools/corrupt-repo/objects/50/a3e25feff71ce0ed49a252cd8e8d0875ff068ec519294909b4a9d891387da8.dirtree new file mode 100644 index 0000000000..f1a8040d3c Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/50/a3e25feff71ce0ed49a252cd8e8d0875ff068ec519294909b4a9d891387da8.dirtree differ diff --git a/tests/sota_tools/corrupt-repo/objects/57/eaca653cfe98e09679d8c2dc5664fc742edba33f70f755c9d4e8f5334c606a.filez b/tests/sota_tools/corrupt-repo/objects/57/eaca653cfe98e09679d8c2dc5664fc742edba33f70f755c9d4e8f5334c606a.filez new file mode 100644 index 0000000000..9395eced85 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/57/eaca653cfe98e09679d8c2dc5664fc742edba33f70f755c9d4e8f5334c606a.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/59/16c80bc90d0e4d90a070321a59823cb87dfbb69c428798798c8b7746eabe83.filez b/tests/sota_tools/corrupt-repo/objects/59/16c80bc90d0e4d90a070321a59823cb87dfbb69c428798798c8b7746eabe83.filez new file mode 100644 index 0000000000..58646855ae Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/59/16c80bc90d0e4d90a070321a59823cb87dfbb69c428798798c8b7746eabe83.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/5c/c9a32bbd8ae107cbfcbdb8102e042e843820e956937f39de45dd238429c319.filez b/tests/sota_tools/corrupt-repo/objects/5c/c9a32bbd8ae107cbfcbdb8102e042e843820e956937f39de45dd238429c319.filez new file mode 100644 index 0000000000..7fe25b26f9 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/5c/c9a32bbd8ae107cbfcbdb8102e042e843820e956937f39de45dd238429c319.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/5e/fba1984be23af2911748e25b045fc983d19e84e8eff92eca2b1d02dabacdfe.filez b/tests/sota_tools/corrupt-repo/objects/5e/fba1984be23af2911748e25b045fc983d19e84e8eff92eca2b1d02dabacdfe.filez new file mode 100644 index 0000000000..7338dd3766 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/5e/fba1984be23af2911748e25b045fc983d19e84e8eff92eca2b1d02dabacdfe.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/65/4b35cee48bbbb2a4dc874143205f2adf2f09b466ca6bb0ef44efa6c3166e80.filez b/tests/sota_tools/corrupt-repo/objects/65/4b35cee48bbbb2a4dc874143205f2adf2f09b466ca6bb0ef44efa6c3166e80.filez new file mode 100644 index 0000000000..a7836bd833 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/65/4b35cee48bbbb2a4dc874143205f2adf2f09b466ca6bb0ef44efa6c3166e80.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/69/9d0bf1974fc2c1d720e568da8045f2d92b47cf6abec6d935b4f3a876627e92.filez b/tests/sota_tools/corrupt-repo/objects/69/9d0bf1974fc2c1d720e568da8045f2d92b47cf6abec6d935b4f3a876627e92.filez new file mode 100644 index 0000000000..2b91930497 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/69/9d0bf1974fc2c1d720e568da8045f2d92b47cf6abec6d935b4f3a876627e92.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/6b/aa2402bcd6f2aa1181256fa0f4717e28439e9668c361ca9290f5247b41a925.filez b/tests/sota_tools/corrupt-repo/objects/6b/aa2402bcd6f2aa1181256fa0f4717e28439e9668c361ca9290f5247b41a925.filez new file mode 100644 index 0000000000..5d0af497cf Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/6b/aa2402bcd6f2aa1181256fa0f4717e28439e9668c361ca9290f5247b41a925.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/6f/cb7c306ed8317996c460c1811c19562bfbec099666c716a6aacfe8adaf6e03.filez b/tests/sota_tools/corrupt-repo/objects/6f/cb7c306ed8317996c460c1811c19562bfbec099666c716a6aacfe8adaf6e03.filez new file mode 100644 index 0000000000..2a0f406d2d Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/6f/cb7c306ed8317996c460c1811c19562bfbec099666c716a6aacfe8adaf6e03.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/71/ba15515d7ffcd2c66f612501e5e4f81d8e14f98683f8b8f388ea232112a543.filez b/tests/sota_tools/corrupt-repo/objects/71/ba15515d7ffcd2c66f612501e5e4f81d8e14f98683f8b8f388ea232112a543.filez new file mode 100644 index 0000000000..60e4029b71 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/71/ba15515d7ffcd2c66f612501e5e4f81d8e14f98683f8b8f388ea232112a543.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/74/388f06baf40251b14a4a5aa6f218e55b31839f77a632f1d991f8aff0295646.filez b/tests/sota_tools/corrupt-repo/objects/74/388f06baf40251b14a4a5aa6f218e55b31839f77a632f1d991f8aff0295646.filez new file mode 100644 index 0000000000..3d18caffa8 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/74/388f06baf40251b14a4a5aa6f218e55b31839f77a632f1d991f8aff0295646.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/77/60a25c9cf5d789b2c5baf1ac9565a2c149c40d386ba2647b5d2cd6295bd5c2.filez b/tests/sota_tools/corrupt-repo/objects/77/60a25c9cf5d789b2c5baf1ac9565a2c149c40d386ba2647b5d2cd6295bd5c2.filez new file mode 100644 index 0000000000..038ad0fe29 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/77/60a25c9cf5d789b2c5baf1ac9565a2c149c40d386ba2647b5d2cd6295bd5c2.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/77/70c2bc4ed07b1c3c722f2bc4469cac206c6005fc5274309081ef1ba8e8e539.filez b/tests/sota_tools/corrupt-repo/objects/77/70c2bc4ed07b1c3c722f2bc4469cac206c6005fc5274309081ef1ba8e8e539.filez new file mode 100644 index 0000000000..a3565c55f5 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/77/70c2bc4ed07b1c3c722f2bc4469cac206c6005fc5274309081ef1ba8e8e539.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/77/850609e851a60c35993a7bb1eb4758412a6cc8a5216f0787642d638e47a654.filez b/tests/sota_tools/corrupt-repo/objects/77/850609e851a60c35993a7bb1eb4758412a6cc8a5216f0787642d638e47a654.filez new file mode 100644 index 0000000000..4179b14024 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/77/850609e851a60c35993a7bb1eb4758412a6cc8a5216f0787642d638e47a654.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/78/0fbf118b12921b1604923252224c961b09bca3a6453d31bb0e5c27356a3712.filez b/tests/sota_tools/corrupt-repo/objects/78/0fbf118b12921b1604923252224c961b09bca3a6453d31bb0e5c27356a3712.filez new file mode 100644 index 0000000000..53d056cb03 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/78/0fbf118b12921b1604923252224c961b09bca3a6453d31bb0e5c27356a3712.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/78/45003ba86ded48f471ee0c97bc621703aeaa2eaeda8db1f431f7b38ee8a436.dirtree b/tests/sota_tools/corrupt-repo/objects/78/45003ba86ded48f471ee0c97bc621703aeaa2eaeda8db1f431f7b38ee8a436.dirtree new file mode 100644 index 0000000000..bdcabbb083 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/78/45003ba86ded48f471ee0c97bc621703aeaa2eaeda8db1f431f7b38ee8a436.dirtree differ diff --git a/tests/sota_tools/corrupt-repo/objects/82/95ee39ce26d7bee6c43f765f0a6da08c2d9bac41cfeaef8a05469055604e43.filez b/tests/sota_tools/corrupt-repo/objects/82/95ee39ce26d7bee6c43f765f0a6da08c2d9bac41cfeaef8a05469055604e43.filez new file mode 100644 index 0000000000..ca61ce84c9 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/82/95ee39ce26d7bee6c43f765f0a6da08c2d9bac41cfeaef8a05469055604e43.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/83/a0a6e6160fa5751f57fe129af6d23c77fdc997945552b0d892428df6743097.filez b/tests/sota_tools/corrupt-repo/objects/83/a0a6e6160fa5751f57fe129af6d23c77fdc997945552b0d892428df6743097.filez new file mode 100644 index 0000000000..f8db2d75e7 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/83/a0a6e6160fa5751f57fe129af6d23c77fdc997945552b0d892428df6743097.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/86/3de625f305413dc3be306afab7c3f39d8713045cfff812b3af83f9722851f0.commit b/tests/sota_tools/corrupt-repo/objects/86/3de625f305413dc3be306afab7c3f39d8713045cfff812b3af83f9722851f0.commit new file mode 100644 index 0000000000..7757d731c3 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/86/3de625f305413dc3be306afab7c3f39d8713045cfff812b3af83f9722851f0.commit differ diff --git a/tests/sota_tools/corrupt-repo/objects/87/1b02ca9b90eb8e0b257f5d7e900b89801ce93e0903a1f655eeaa9d4094f053.dirtree b/tests/sota_tools/corrupt-repo/objects/87/1b02ca9b90eb8e0b257f5d7e900b89801ce93e0903a1f655eeaa9d4094f053.dirtree new file mode 100644 index 0000000000..b29b6972a0 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/87/1b02ca9b90eb8e0b257f5d7e900b89801ce93e0903a1f655eeaa9d4094f053.dirtree differ diff --git a/tests/sota_tools/corrupt-repo/objects/87/4de7d094449a4b6c0c11a2d957741318e44130a59a1251c81781759b685f45.filez b/tests/sota_tools/corrupt-repo/objects/87/4de7d094449a4b6c0c11a2d957741318e44130a59a1251c81781759b685f45.filez new file mode 100644 index 0000000000..c9417518b8 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/87/4de7d094449a4b6c0c11a2d957741318e44130a59a1251c81781759b685f45.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/89/39d12474c34016168b1f5df2026f13b99b5f34eacade61959d0f99a1e68c64.filez b/tests/sota_tools/corrupt-repo/objects/89/39d12474c34016168b1f5df2026f13b99b5f34eacade61959d0f99a1e68c64.filez new file mode 100644 index 0000000000..03fb58135a Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/89/39d12474c34016168b1f5df2026f13b99b5f34eacade61959d0f99a1e68c64.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/8b/70597e41d81f0b37319f862119f2e284694ea5a08e52c8dd8dbf3d91e6b8e2.filez b/tests/sota_tools/corrupt-repo/objects/8b/70597e41d81f0b37319f862119f2e284694ea5a08e52c8dd8dbf3d91e6b8e2.filez new file mode 100644 index 0000000000..d5e8e63bfe Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/8b/70597e41d81f0b37319f862119f2e284694ea5a08e52c8dd8dbf3d91e6b8e2.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/8d/5ef742c7adee56a024c50a281288397fc44e180d8f5035a4731390032d6ecd.filez b/tests/sota_tools/corrupt-repo/objects/8d/5ef742c7adee56a024c50a281288397fc44e180d8f5035a4731390032d6ecd.filez new file mode 100644 index 0000000000..b856495f92 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/8d/5ef742c7adee56a024c50a281288397fc44e180d8f5035a4731390032d6ecd.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/96/fd0317603e0993e8854b08f985c0ed80fc579a04d432abe3f8faf82c654217.filez b/tests/sota_tools/corrupt-repo/objects/96/fd0317603e0993e8854b08f985c0ed80fc579a04d432abe3f8faf82c654217.filez new file mode 100644 index 0000000000..27c8d86125 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/96/fd0317603e0993e8854b08f985c0ed80fc579a04d432abe3f8faf82c654217.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/98/fc711fc44f70732d7b64d5072881cb6e1428c43e08a993ccc8867d4b4f7038.filez b/tests/sota_tools/corrupt-repo/objects/98/fc711fc44f70732d7b64d5072881cb6e1428c43e08a993ccc8867d4b4f7038.filez new file mode 100644 index 0000000000..18a497bebe Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/98/fc711fc44f70732d7b64d5072881cb6e1428c43e08a993ccc8867d4b4f7038.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/99/74c198c880b42fac2bd1440f37fae6645a153eb898de4f0994eccd38c2687f.filez b/tests/sota_tools/corrupt-repo/objects/99/74c198c880b42fac2bd1440f37fae6645a153eb898de4f0994eccd38c2687f.filez new file mode 100644 index 0000000000..c7f2fc28b7 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/99/74c198c880b42fac2bd1440f37fae6645a153eb898de4f0994eccd38c2687f.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/9a/71ee4b402b5b0611f8c549f6e7d605c9644b81f7a978991f2a4d408c19c363.filez b/tests/sota_tools/corrupt-repo/objects/9a/71ee4b402b5b0611f8c549f6e7d605c9644b81f7a978991f2a4d408c19c363.filez new file mode 100644 index 0000000000..2ccb919f39 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/9a/71ee4b402b5b0611f8c549f6e7d605c9644b81f7a978991f2a4d408c19c363.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/9f/ab684521701c5c074ffb1687987c104551aa69c04c8a202fa3917233e7c59f.filez b/tests/sota_tools/corrupt-repo/objects/9f/ab684521701c5c074ffb1687987c104551aa69c04c8a202fa3917233e7c59f.filez new file mode 100644 index 0000000000..d484d45f40 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/9f/ab684521701c5c074ffb1687987c104551aa69c04c8a202fa3917233e7c59f.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/a2/cebaad0e2b982b7ce423fdbc34209280282668809951bec194ff42ef19694a.filez b/tests/sota_tools/corrupt-repo/objects/a2/cebaad0e2b982b7ce423fdbc34209280282668809951bec194ff42ef19694a.filez new file mode 100644 index 0000000000..f16acdd726 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/a2/cebaad0e2b982b7ce423fdbc34209280282668809951bec194ff42ef19694a.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/a3/e103c74629edf196c19593dfdfab33bf6cde5ccdaa7d66bd18d39214b0cf9b.filez b/tests/sota_tools/corrupt-repo/objects/a3/e103c74629edf196c19593dfdfab33bf6cde5ccdaa7d66bd18d39214b0cf9b.filez new file mode 100644 index 0000000000..287598f2e2 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/a3/e103c74629edf196c19593dfdfab33bf6cde5ccdaa7d66bd18d39214b0cf9b.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/a6/5c26ea98475894c05c0753f7a5b232e50cb0fd8c67013663725c24b5133b60.filez b/tests/sota_tools/corrupt-repo/objects/a6/5c26ea98475894c05c0753f7a5b232e50cb0fd8c67013663725c24b5133b60.filez new file mode 100644 index 0000000000..73938edac1 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/a6/5c26ea98475894c05c0753f7a5b232e50cb0fd8c67013663725c24b5133b60.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/a8/5e75add6cf2b9205187dc4c3795be57f9e1c441bf577bb90552cb40ce6b180.filez b/tests/sota_tools/corrupt-repo/objects/a8/5e75add6cf2b9205187dc4c3795be57f9e1c441bf577bb90552cb40ce6b180.filez new file mode 100644 index 0000000000..2f4d2dab0e Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/a8/5e75add6cf2b9205187dc4c3795be57f9e1c441bf577bb90552cb40ce6b180.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/b7/f9b3b5e888156b3864ff202e85727dec9340451951e60abc068d1d20e707a4.filez b/tests/sota_tools/corrupt-repo/objects/b7/f9b3b5e888156b3864ff202e85727dec9340451951e60abc068d1d20e707a4.filez new file mode 100644 index 0000000000..c9883fad09 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/b7/f9b3b5e888156b3864ff202e85727dec9340451951e60abc068d1d20e707a4.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/bc/aa0f4974fd75384d6483d0d3e54f2b4383a3c38de5487305c81d9504345589.filez b/tests/sota_tools/corrupt-repo/objects/bc/aa0f4974fd75384d6483d0d3e54f2b4383a3c38de5487305c81d9504345589.filez new file mode 100644 index 0000000000..d556c7fb02 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/bc/aa0f4974fd75384d6483d0d3e54f2b4383a3c38de5487305c81d9504345589.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/c0/007610f4c0d4bf8464cf9408cc0770345a9ea519beb7268281ad08b8efb004.dirtree b/tests/sota_tools/corrupt-repo/objects/c0/007610f4c0d4bf8464cf9408cc0770345a9ea519beb7268281ad08b8efb004.dirtree new file mode 100644 index 0000000000..4402f8d0a1 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/c0/007610f4c0d4bf8464cf9408cc0770345a9ea519beb7268281ad08b8efb004.dirtree differ diff --git a/tests/sota_tools/corrupt-repo/objects/c0/7690b56017ccfbf29413e330de997455bac470d6752e7e432020bb99fb9296.filez b/tests/sota_tools/corrupt-repo/objects/c0/7690b56017ccfbf29413e330de997455bac470d6752e7e432020bb99fb9296.filez new file mode 100644 index 0000000000..5ff7f9c497 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/c0/7690b56017ccfbf29413e330de997455bac470d6752e7e432020bb99fb9296.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/cd/01fa8a2bcd5c7ea648f1bd3e3a41cd90b7e3b47eb64e301c08d1de0e324174.filez b/tests/sota_tools/corrupt-repo/objects/cd/01fa8a2bcd5c7ea648f1bd3e3a41cd90b7e3b47eb64e301c08d1de0e324174.filez new file mode 100644 index 0000000000..62c45fc6d3 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/cd/01fa8a2bcd5c7ea648f1bd3e3a41cd90b7e3b47eb64e301c08d1de0e324174.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/d6/6a3a02a1bb7b12546c9996f0d095faf2116260a0ffb9e9d8e35d8fb347f20f.filez b/tests/sota_tools/corrupt-repo/objects/d6/6a3a02a1bb7b12546c9996f0d095faf2116260a0ffb9e9d8e35d8fb347f20f.filez new file mode 100644 index 0000000000..3cc2a8ebfb Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/d6/6a3a02a1bb7b12546c9996f0d095faf2116260a0ffb9e9d8e35d8fb347f20f.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/d7/099beea4aa6e03dc05b4b186a718ddd729a4b33b6b079ef1144934f0b84c39.filez b/tests/sota_tools/corrupt-repo/objects/d7/099beea4aa6e03dc05b4b186a718ddd729a4b33b6b079ef1144934f0b84c39.filez new file mode 100644 index 0000000000..c18e87f7bc Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/d7/099beea4aa6e03dc05b4b186a718ddd729a4b33b6b079ef1144934f0b84c39.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/ed/b0c4362e4ba2fb5b29a7ff34c85b8bebba3fdc4eaba83d3fd57a23cfa350f7.filez b/tests/sota_tools/corrupt-repo/objects/ed/b0c4362e4ba2fb5b29a7ff34c85b8bebba3fdc4eaba83d3fd57a23cfa350f7.filez new file mode 100644 index 0000000000..005e141f5b Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/ed/b0c4362e4ba2fb5b29a7ff34c85b8bebba3fdc4eaba83d3fd57a23cfa350f7.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/ef/64d2dea369c32294f587e523d873a99e89da085d700d658301b277ee58ba02.filez b/tests/sota_tools/corrupt-repo/objects/ef/64d2dea369c32294f587e523d873a99e89da085d700d658301b277ee58ba02.filez new file mode 100644 index 0000000000..5ef9d1f016 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/ef/64d2dea369c32294f587e523d873a99e89da085d700d658301b277ee58ba02.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/f2/4f48fe43f9cb716e2bcd58582bb51e0ae3a41570fe5837e9e9572beb14deb6.filez b/tests/sota_tools/corrupt-repo/objects/f2/4f48fe43f9cb716e2bcd58582bb51e0ae3a41570fe5837e9e9572beb14deb6.filez new file mode 100644 index 0000000000..08819d94d7 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/f2/4f48fe43f9cb716e2bcd58582bb51e0ae3a41570fe5837e9e9572beb14deb6.filez differ diff --git a/tests/sota_tools/corrupt-repo/objects/f7/b6b289bd1479c8c74418978b5de4851d31304e3dcef26e4242112bfeb141c7.filez b/tests/sota_tools/corrupt-repo/objects/f7/b6b289bd1479c8c74418978b5de4851d31304e3dcef26e4242112bfeb141c7.filez new file mode 100644 index 0000000000..dd46366cb3 Binary files /dev/null and b/tests/sota_tools/corrupt-repo/objects/f7/b6b289bd1479c8c74418978b5de4851d31304e3dcef26e4242112bfeb141c7.filez differ diff --git a/tests/sota_tools/corrupt-repo/refs/heads/master b/tests/sota_tools/corrupt-repo/refs/heads/master new file mode 100644 index 0000000000..760f1a1ce2 --- /dev/null +++ b/tests/sota_tools/corrupt-repo/refs/heads/master @@ -0,0 +1 @@ +863de625f305413dc3be306afab7c3f39d8713045cfff812b3af83f9722851f0 diff --git a/tests/sota_tools/repo/refs/heads/master b/tests/sota_tools/repo/refs/heads/master index 502d7d0ac8..d622b5b52b 100644 --- a/tests/sota_tools/repo/refs/heads/master +++ b/tests/sota_tools/repo/refs/heads/master @@ -1 +1 @@ -16ef2f2629dc9263fdf3c0f032563a2d757623bbc11cf99df25c3c3f258dccbe +16ef2f2629dc9263fdf3c0f032563a2d757623bbc11cf99df25c3c3f258dccbe \ No newline at end of file diff --git a/tests/sota_tools/test-auth-plus-failure b/tests/sota_tools/test-auth-plus-failure.sh similarity index 70% rename from tests/sota_tools/test-auth-plus-failure rename to tests/sota_tools/test-auth-plus-failure.sh index c1e777f706..969056108f 100755 --- a/tests/sota_tools/test-auth-plus-failure +++ b/tests/sota_tools/test-auth-plus-failure.sh @@ -2,8 +2,8 @@ set -uo pipefail CREDS_FILE=$(mktemp) -trap "rm -f $CREDS_FILE" EXIT -cat > $CREDS_FILE < "$CREDS_FILE" < $CREDS_FILE <&1 | grep -q ca-certificates -test $? -eq 1 diff --git a/tests/sota_tools/test-cacert-used.sh b/tests/sota_tools/test-cacert-used.sh new file mode 100755 index 0000000000..4475e94c85 --- /dev/null +++ b/tests/sota_tools/test-cacert-used.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -u + +$3 "$1" --ref master --repo sota_tools/repo --credentials "$2" --cacert sota_tools/testcerts.crt -n 2>&1 | grep -q ca-certificates +test $? -eq 1 diff --git a/tests/sota_tools/test-dry-run b/tests/sota_tools/test-dry-run.sh similarity index 100% rename from tests/sota_tools/test-dry-run rename to tests/sota_tools/test-dry-run.sh diff --git a/tests/sota_tools/test-garage-deploy-dry-run b/tests/sota_tools/test-garage-deploy-dry-run deleted file mode 100755 index f24e4e090c..0000000000 --- a/tests/sota_tools/test-garage-deploy-dry-run +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -set -euo pipefail -trap 'kill %1' EXIT - -TEMP_DIR=$(mktemp -d) - -PORT=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()') -TREEHUB="{\ - \"ostree\": {\ - \"server\": \"http://localhost:$PORT/\"\ - }\ -}" - -echo $TREEHUB > $TEMP_DIR/treehub.json -./tests/sota_tools/treehub_server.py -p${PORT} -c & -until curl -I localhost:${PORT} 2>/dev/null; do sleep 0.5; done - -$1 --commit b9ac1e45f9227df8ee191b6e51e09417bd36c6ebbeff999431e3073ac50f0563 -f $TEMP_DIR/treehub.json -p $TEMP_DIR/treehub.json --name testname -h hwids -n -exit_code=$? -exit $exit_code \ No newline at end of file diff --git a/tests/sota_tools/test-garage-deploy-dry-run.sh b/tests/sota_tools/test-garage-deploy-dry-run.sh new file mode 100755 index 0000000000..7945031423 --- /dev/null +++ b/tests/sota_tools/test-garage-deploy-dry-run.sh @@ -0,0 +1,20 @@ +#!/bin/bash +set -euo pipefail +trap 'kill %1' EXIT + +TEMP_DIR=$(mktemp -d) + +PORT=$(python3 -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()') +TREEHUB="{\ + \"ostree\": {\ + \"server\": \"http://localhost:$PORT/\"\ + }\ +}" + +echo "$TREEHUB" > "$TEMP_DIR"/treehub.json +./tests/sota_tools/treehub_server.py -p"${PORT}" -c & +until curl -I localhost:"${PORT}" 2>/dev/null; do sleep 0.5; done + +$1 --commit b9ac1e45f9227df8ee191b6e51e09417bd36c6ebbeff999431e3073ac50f0563 -f "$TEMP_DIR"/treehub.json -p "$TEMP_DIR"/treehub.json --name testname -h hwids -n +exit_code=$? +exit $exit_code \ No newline at end of file diff --git a/tests/sota_tools/test-garage-deploy-missing-push-credentials b/tests/sota_tools/test-garage-deploy-missing-fetch-credentials.sh similarity index 63% rename from tests/sota_tools/test-garage-deploy-missing-push-credentials rename to tests/sota_tools/test-garage-deploy-missing-fetch-credentials.sh index 5ce3832424..7cdc384dd2 100755 --- a/tests/sota_tools/test-garage-deploy-missing-push-credentials +++ b/tests/sota_tools/test-garage-deploy-missing-fetch-credentials.sh @@ -2,4 +2,4 @@ set -eu TARGET="Unable to read a-non-existent-file as archive or json file" # File not found and invalid/corrupt files are treated the same -$1 --commit 16ef2f2629dc9263fdf3c0f032563a2d757623bbc11cf99df25c3c3f258dccbe -f ./tests/sota_tools/auth_test_good.zip -p a-non-existent-file --name testname -h hwids 2>&1 | grep "$TARGET" +$1 --commit 16ef2f2629dc9263fdf3c0f032563a2d757623bbc11cf99df25c3c3f258dccbe -f a-non-existent-file -p "$2" --name testname -h hwids 2>&1 | grep "$TARGET" diff --git a/tests/sota_tools/test-garage-deploy-missing-fetch-credentials b/tests/sota_tools/test-garage-deploy-missing-push-credentials.sh similarity index 63% rename from tests/sota_tools/test-garage-deploy-missing-fetch-credentials rename to tests/sota_tools/test-garage-deploy-missing-push-credentials.sh index d093886574..c1776f1d7b 100755 --- a/tests/sota_tools/test-garage-deploy-missing-fetch-credentials +++ b/tests/sota_tools/test-garage-deploy-missing-push-credentials.sh @@ -2,4 +2,4 @@ set -eu TARGET="Unable to read a-non-existent-file as archive or json file" # File not found and invalid/corrupt files are treated the same -$1 --commit 16ef2f2629dc9263fdf3c0f032563a2d757623bbc11cf99df25c3c3f258dccbe -f a-non-existent-file -p ./tests/sota_tools/auth_test_good.zip --name testname -h hwids 2>&1 | grep "$TARGET" +$1 --commit 16ef2f2629dc9263fdf3c0f032563a2d757623bbc11cf99df25c3c3f258dccbe -f "$2" -p a-non-existent-file --name testname -h hwids 2>&1 | grep "$TARGET" diff --git a/tests/sota_tools/test-garage-deploy-offline-signing b/tests/sota_tools/test-garage-deploy-offline-signing.sh similarity index 78% rename from tests/sota_tools/test-garage-deploy-offline-signing rename to tests/sota_tools/test-garage-deploy-offline-signing.sh index 9eda0e0c3c..73c8c9d547 100755 --- a/tests/sota_tools/test-garage-deploy-offline-signing +++ b/tests/sota_tools/test-garage-deploy-offline-signing.sh @@ -4,7 +4,7 @@ trap 'kill %1' EXIT TEMP_DIR=$(mktemp -d) -PORT=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()') +PORT=$(python3 -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()') TREEHUB="{\ \"ostree\": {\ \"server\": \"http://localhost:$PORT/\"\ @@ -17,8 +17,8 @@ chmod +x "$TEMP_DIR/garage-sign" export PATH=$PATH:$TEMP_DIR echo "$TREEHUB" > "$TEMP_DIR/treehub.json" -./tests/sota_tools/treehub_server.py -p${PORT} -c & -until curl -I localhost:${PORT} 2>/dev/null; do sleep 0.5; done +./tests/sota_tools/treehub_server.py -p"${PORT}" -c & +until curl -I localhost:"${PORT}" 2>/dev/null; do sleep 0.5; done cd "$TEMP_DIR" # Currently, if credentials do not support offline signing, this will fail. If diff --git a/tests/sota_tools/test-garage-deploy-online-signing b/tests/sota_tools/test-garage-deploy-online-signing deleted file mode 100755 index 73bad61490..0000000000 --- a/tests/sota_tools/test-garage-deploy-online-signing +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -set -euo pipefail -trap 'kill %1' EXIT - -TEMP_DIR=$(mktemp -d) - -PORT=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()') -TREEHUB="{\ - \"ostree\": {\ - \"server\": \"http://localhost:$PORT/\"\ - }\ -}" - -echo $TREEHUB > $TEMP_DIR/treehub.json -./tests/sota_tools/treehub_server.py -p${PORT} -c & -until curl -I localhost:${PORT} 2>/dev/null; do sleep 0.5; done - -$1 --commit b9ac1e45f9227df8ee191b6e51e09417bd36c6ebbeff999431e3073ac50f0563 -f $TEMP_DIR/treehub.json -p $TEMP_DIR/treehub.json --name testname -h hwids -exit_code=$? -exit $exit_code diff --git a/tests/sota_tools/test-garage-deploy-online-signing.sh b/tests/sota_tools/test-garage-deploy-online-signing.sh new file mode 100755 index 0000000000..cecb670f3e --- /dev/null +++ b/tests/sota_tools/test-garage-deploy-online-signing.sh @@ -0,0 +1,20 @@ +#!/bin/bash +set -euo pipefail +trap 'kill %1' EXIT + +TEMP_DIR=$(mktemp -d) + +PORT=$(python3 -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()') +TREEHUB="{\ + \"ostree\": {\ + \"server\": \"http://localhost:$PORT/\"\ + }\ +}" + +echo "$TREEHUB" > "$TEMP_DIR"/treehub.json +./tests/sota_tools/treehub_server.py -p"${PORT}" -c & +until curl -I localhost:"${PORT}" 2>/dev/null; do sleep 0.5; done + +$1 --commit b9ac1e45f9227df8ee191b6e51e09417bd36c6ebbeff999431e3073ac50f0563 -f "$TEMP_DIR"/treehub.json -p "$TEMP_DIR"/treehub.json --name testname -h hwids +exit_code=$? +exit $exit_code diff --git a/tests/sota_tools/test-garage-deploy-upload-failed b/tests/sota_tools/test-garage-deploy-upload-failed deleted file mode 100755 index a3cbb86881..0000000000 --- a/tests/sota_tools/test-garage-deploy-upload-failed +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -set -euo pipefail -trap 'kill %1' EXIT - -TEMP_DIR=$(mktemp -d) - -PORT=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()') -TREEHUB="{\ - \"ostree\": {\ - \"server\": \"http://localhost:$PORT/\"\ - }\ -}" - -TREEHUB_DEST="{\ - \"ostree\": {\ - \"server\": \"http://localhost:1/\"\ - }\ -}" - -echo $TREEHUB > $TEMP_DIR/treehub.json -echo $TREEHUB_DEST > $TEMP_DIR/treehub_dest.json - -./tests/sota_tools/treehub_server.py -p${PORT} -c & -until curl -I localhost:${PORT} 2>/dev/null; do sleep 0.5; done - -$1 --commit b9ac1e45f9227df8ee191b6e51e09417bd36c6ebbeff999431e3073ac50f0563 -f $TEMP_DIR/treehub.json -p $TEMP_DIR/treehub_dest.json --name testname -h hwids -exit_code=$? -exit $exit_code diff --git a/tests/sota_tools/test-garage-deploy-upload-failed.sh b/tests/sota_tools/test-garage-deploy-upload-failed.sh new file mode 100755 index 0000000000..9de9b1f704 --- /dev/null +++ b/tests/sota_tools/test-garage-deploy-upload-failed.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -euo pipefail +trap 'kill %1' EXIT + +TEMP_DIR=$(mktemp -d) + +PORT=$(python3 -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()') +TREEHUB="{\ + \"ostree\": {\ + \"server\": \"http://localhost:$PORT/\"\ + }\ +}" + +TREEHUB_DEST="{\ + \"ostree\": {\ + \"server\": \"http://localhost:1/\"\ + }\ +}" + +echo "$TREEHUB" > "$TEMP_DIR"/treehub.json +echo "$TREEHUB_DEST" > "$TEMP_DIR"/treehub_dest.json + +./tests/sota_tools/treehub_server.py -p"${PORT}" -c & +until curl -I localhost:"${PORT}" 2>/dev/null; do sleep 0.5; done + +$1 --commit b9ac1e45f9227df8ee191b6e51e09417bd36c6ebbeff999431e3073ac50f0563 -f "$TEMP_DIR"/treehub.json -p "$TEMP_DIR"/treehub_dest.json --name testname -h hwids +exit_code=$? +exit $exit_code diff --git a/tests/sota_tools/test-invalid-credentials b/tests/sota_tools/test-invalid-credentials.sh similarity index 100% rename from tests/sota_tools/test-invalid-credentials rename to tests/sota_tools/test-invalid-credentials.sh diff --git a/tests/sota_tools/test-missing-commit b/tests/sota_tools/test-missing-commit.sh similarity index 50% rename from tests/sota_tools/test-missing-commit rename to tests/sota_tools/test-missing-commit.sh index ff6b8003fc..74c9d5019c 100755 --- a/tests/sota_tools/test-missing-commit +++ b/tests/sota_tools/test-missing-commit.sh @@ -4,17 +4,17 @@ trap 'kill %1' EXIT TEMP_DIR=$(mktemp -d) -PORT=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()') +PORT=$(python3 -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()') TREEHUB="{\ \"ostree\": {\ \"server\": \"http://localhost:$PORT/\"\ }\ }" -echo $TREEHUB > $TEMP_DIR/treehub.json -./tests/sota_tools/treehub_server.py --tls -p${PORT} -d${TEMP_DIR} & +echo "$TREEHUB" > "$TEMP_DIR"/treehub.json +./tests/sota_tools/treehub_server.py --tls -p"${PORT}" -d"${TEMP_DIR}" & sleep 1 TARGET="OSTree commit 16ef2f2629dc9263fdf3c0f032563a2d757623bbc11cf99df25c3c3f258dccbe was not found in src repository" -($1 --commit 16ef2f2629dc9263fdf3c0f032563a2d757623bbc11cf99df25c3c3f258dccbe -f $TEMP_DIR/treehub.json -p $TEMP_DIR/treehub.json --name testname -h hwids || true) | grep -x "$TARGET" +($1 --commit 16ef2f2629dc9263fdf3c0f032563a2d757623bbc11cf99df25c3c3f258dccbe -f "$TEMP_DIR"/treehub.json -p "$TEMP_DIR"/treehub.json --name testname -h hwids || true) | grep -x "$TARGET" exit $? diff --git a/tests/sota_tools/test-missing-credentials b/tests/sota_tools/test-missing-credentials.sh similarity index 100% rename from tests/sota_tools/test-missing-credentials rename to tests/sota_tools/test-missing-credentials.sh diff --git a/tests/sota_tools/test-missing-ref b/tests/sota_tools/test-missing-ref.sh similarity index 50% rename from tests/sota_tools/test-missing-ref rename to tests/sota_tools/test-missing-ref.sh index be00990928..00c5aa2a21 100755 --- a/tests/sota_tools/test-missing-ref +++ b/tests/sota_tools/test-missing-ref.sh @@ -2,4 +2,4 @@ set -eu TARGET="Ref or commit refhash badref was not found in repository sota_tools/repo" -$1 --ref badref --repo sota_tools/repo --credentials sota_tools/auth_test_good.zip | grep -q "$TARGET" +$1 --ref badref --repo sota_tools/repo --credentials "$2" | grep -q "$TARGET" diff --git a/tests/sota_tools/test-server-500 b/tests/sota_tools/test-server-500.py similarity index 93% rename from tests/sota_tools/test-server-500 rename to tests/sota_tools/test-server-500.py index 866b1d240c..d42e024703 100755 --- a/tests/sota_tools/test-server-500 +++ b/tests/sota_tools/test-server-500.py @@ -1,9 +1,7 @@ #! /usr/bin/env python3 """ -Regression test for PRO-2902 - -The bug was caused by the logic that cleanly shuts down the request pool when +Regression test for a bug caused by the logic that shuts down the request pool when the server returns an error. This test causes 3 'query' requests to hang until an upload fails. Once the upload has failed and the server is trying to shutdown, the queries are released. In the broken case this caused RequestPool @@ -56,7 +54,7 @@ def main(): def handler(*args): TreehubServer(ostree_repo, *args) - httpd = ThreadingTCPServer(('127.0.0.1', 0), handler) + httpd = ThreadingTCPServer(('localhost', 0), handler) address, port = httpd.socket.getsockname() print("Serving at port", port) t = threading.Thread(target=httpd.serve_forever) diff --git a/tests/sota_tools/test-server-500_after_20 b/tests/sota_tools/test-server-500_after_20.py similarity index 95% rename from tests/sota_tools/test-server-500_after_20 rename to tests/sota_tools/test-server-500_after_20.py index 478fe09bcc..57ff4a04aa 100755 --- a/tests/sota_tools/test-server-500_after_20 +++ b/tests/sota_tools/test-server-500_after_20.py @@ -31,7 +31,7 @@ def main(): def handler(*args): TreehubServer(ostree_repo, *args) - httpd = ThreadingTCPServer(('127.0.0.1', 0), handler) + httpd = ThreadingTCPServer(('localhost', 0), handler) address, port = httpd.socket.getsockname() print("Serving at port", port) t = threading.Thread(target=httpd.serve_forever) diff --git a/tests/sota_tools/test-server-error_every_10 b/tests/sota_tools/test-server-error_every_10.py similarity index 93% rename from tests/sota_tools/test-server-error_every_10 rename to tests/sota_tools/test-server-error_every_10.py index 098483c9cb..c600b08235 100755 --- a/tests/sota_tools/test-server-error_every_10 +++ b/tests/sota_tools/test-server-error_every_10.py @@ -7,8 +7,6 @@ import sys -from time import sleep - class OstreeRepo(object): def __init__(self): @@ -33,7 +31,7 @@ def main(): def handler(*args): TreehubServer(ostree_repo, *args) - httpd = ThreadingTCPServer(('127.0.0.1', 0), handler) + httpd = ThreadingTCPServer(('localhost', 0), handler) address, port = httpd.socket.getsockname() print("Serving at port", port) t = threading.Thread(target=httpd.serve_forever) diff --git a/tests/sota_tools/test-trace-logging.sh b/tests/sota_tools/test-trace-logging.sh new file mode 100755 index 0000000000..5e793b117e --- /dev/null +++ b/tests/sota_tools/test-trace-logging.sh @@ -0,0 +1,4 @@ +#!/bin/bash +set -eu + +$1 --ref master --repo sota_tools/repo --credentials "$2" -n --loglevel 0 2>&1 | grep -q "Content-Type" diff --git a/tests/sota_tools/test-upload-corrupt-object.py b/tests/sota_tools/test-upload-corrupt-object.py new file mode 100755 index 0000000000..af1c5e0e56 --- /dev/null +++ b/tests/sota_tools/test-upload-corrupt-object.py @@ -0,0 +1,80 @@ +#! /usr/bin/env python3 + +from mocktreehub import TreehubServer, TemporaryCredentials +from socketserver import ThreadingTCPServer +import subprocess +import threading + +import sys + +# This object was manually corrupted by overwriting the last 2 bytes with FFFF +bad_object = '41/45b1a9bade30efb28ff921f7a555ff82ba7d3b7b83b968084436167912fa83.filez' + + +class OstreeRepo(object): + """ + Detect if garage-push uploads the known-corrupt object + """ + + def __init__(self): + self.got_bad_object = False + + def upload(self, name): + print("Uploaded", name) + if name == bad_object: + print("garage-push attempted to upload known corrupted object:%s" % name) + self.got_bad_object = True + return 204 + + def query(self, name): + return 404 + + +def main(): + ostree_repo = OstreeRepo() + + def handler(*args): + TreehubServer(ostree_repo, *args) + + httpd = ThreadingTCPServer(('localhost', 0), handler) + address, port = httpd.socket.getsockname() + print("Serving at port", port) + t = threading.Thread(target=httpd.serve_forever) + t.setDaemon(True) + t.start() + + target = sys.argv[1] + + with TemporaryCredentials(port) as creds: + # First try with integrity checks enabled (the default) + dut = subprocess.Popen(args=[target, '--credentials', creds.path(), '--ref', 'master', + '--repo', 'corrupt-repo']) + try: + exitcode = dut.wait(120) + if exitcode == 0: + print("garage-push should report an error result") + sys.exit(1) + if ostree_repo.got_bad_object: + print("Bad object was uploaded") + sys.exit(1) + except subprocess.TimeoutExpired: + print("garage-push hung") + sys.exit(1) + # With --disable-integrity-checks, it should succeed + dut = subprocess.Popen(args=[target, '--credentials', creds.path(), '--ref', 'master', + '--repo', 'corrupt-repo', '--disable-integrity-checks']) + try: + exitcode = dut.wait(120) + if exitcode != 0: + print("garage-push should succeed when integrity checks are not enabled") + sys.exit(1) + if not ostree_repo.got_bad_object: + print("Bad object should have been uploaded anyway") + sys.exit(1) + except subprocess.TimeoutExpired: + print("garage-push hung") + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/tests/sota_tools/test-verbose-logging b/tests/sota_tools/test-verbose-logging deleted file mode 100755 index f5885af17b..0000000000 --- a/tests/sota_tools/test-verbose-logging +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -set -eu - -$1 --ref master --repo sota_tools/repo --credentials $2 -nvv 2>&1 | grep -q "Content-Type" diff --git a/tests/sota_tools/treehub_server.py b/tests/sota_tools/treehub_server.py index 24c4c97f38..03fa61c5b0 100755 --- a/tests/sota_tools/treehub_server.py +++ b/tests/sota_tools/treehub_server.py @@ -8,6 +8,7 @@ import subprocess import sys import time +import hashlib from contextlib import ExitStack from http.server import BaseHTTPRequestHandler, HTTPServer from random import seed, randrange @@ -104,9 +105,9 @@ def sig_handler(signum, frame): sys.exit(0) -def create_repo(path): +def create_repo(path, system_rootfs=False): """ - Creates a new ostree repository with persistent object checksums. + Creates a new OSTree repository with persistent object checksums. To achive persistency, we generate files with the same seed(0). As OSTree content objects objects include uid, gid, and extended attributes, we strip the extended attributes and set the rest to constant values. @@ -114,7 +115,25 @@ def create_repo(path): timestamp in commit. """ tree = Path(path) / 'tree' - tree.mkdir(mode=0o755) + tree.mkdir(mode=0o755, parents=True) + + if system_rootfs: + # make it look like a system rootfs, + # `ostree deploy` command checks for /boot/vmlinuz- and /usr/etc/os-release + boot_dir = os.path.join(tree, 'boot') + etc_dir = os.path.join(tree, 'usr/etc') + + os.makedirs(boot_dir, mode=0o755) + os.makedirs(etc_dir, mode=0o755) + + kernel_file_content = 'I am kernel' + kernel_file_sha = hashlib.sha256(kernel_file_content.encode('utf-8')).hexdigest() + with open(os.path.join(boot_dir, 'vmlinuz-' + kernel_file_sha), 'w') as kernel_file: + kernel_file.write(kernel_file_content) + + with open(os.path.join(etc_dir, 'os-release'), 'w') as os_release: + os_release.write('ID="dummy-os"\nNAME="Generated OSTree-enabled OS\nVERSION="4.14159"') + seed(0) # to generate same files each time try: for i in range(10): @@ -139,8 +158,10 @@ def create_repo(path): parser.add_argument('-p', '--port', type=int, required=True, help='listening port') parser.add_argument('-c', '--create', action='store_true', - help='create new ostree repo') - parser.add_argument('-d', '--dir', help='ostree repo directory') + help='create new OSTree repo') + parser.add_argument('-cs', '--system', action='store_true', + help='make it look like a system rootfs to OSTree', default=False) + parser.add_argument('-d', '--dir', help='OSTree repo directory') parser.add_argument('-f', '--fail', type=int, help='fail every nth request') parser.add_argument('-s', '--sleep', type=float, help='sleep for n.n seconds for every GET request') @@ -156,7 +177,7 @@ def create_repo(path): else: repo_path = stack.enter_context(TemporaryDirectory(prefix='treehub-')) if args.create: - create_repo(repo_path) + create_repo(repo_path, args.system) httpd = HTTPServer(('', args.port), TreehubServerHandler) if args.tls: httpd.socket = ssl.wrap_socket(httpd.socket, diff --git a/tests/test_aktualizr_kill.py b/tests/test_aktualizr_kill.py index 0d0f2b79b7..81bf410847 100755 --- a/tests/test_aktualizr_kill.py +++ b/tests/test_aktualizr_kill.py @@ -3,7 +3,7 @@ import argparse import logging import signal -import time +import re from os import getcwd, chdir @@ -17,32 +17,35 @@ @with_director(start=True) @with_aktualizr(start=False, log_level=0, run_mode='full') def test_aktualizr_kill(director, aktualizr, **kwargs): - test_result = False with aktualizr: try: aktualizr.wait_for_provision() aktualizr.terminate() aktualizr.wait_for_completion() - test_result = 'Aktualizr daemon exiting...' in aktualizr.output() + # Match both "Aktualizr daemon exiting..." and "Aktualizr::RunForever exiting: + output = aktualizr.output() + test_pass = re.search('Aktualizr.*exiting', output) + if not test_pass: + print("Failed to find Aktualizr exit message in:") + print(output) + return test_pass except Exception: aktualizr.terminate(sig=signal.SIGKILL) aktualizr.wait_for_completion() print(aktualizr.output()) raise - return test_result - if __name__ == "__main__": logging.basicConfig(level=logging.INFO) + initial_cwd = getcwd() parser = argparse.ArgumentParser(description='Test aktualizr kill') - parser.add_argument('-b', '--build-dir', help='build directory', default='build') - parser.add_argument('-s', '--src-dir', help='source directory', default='.') + parser.add_argument('-b', '--build-dir', help='build directory', default=initial_cwd + '/build') + parser.add_argument('-s', '--src-dir', help='source directory', default=initial_cwd) input_params = parser.parse_args() KeyStore.base_dir = input_params.src_dir - initial_cwd = getcwd() chdir(input_params.build_dir) test_suite = [ @@ -53,7 +56,7 @@ def test_aktualizr_kill(director, aktualizr, **kwargs): for test in test_suite: logger.info('>>> Running {}...'.format(test.__name__)) test_run_result = test() - logger.info('>>> {}: {}'.format('OK' if test_run_result else 'Failed', test.__name__)) + logger.info('>>> {}: {}\n'.format('OK' if test_run_result else 'FAILED', test.__name__)) test_suite_run_result = test_suite_run_result and test_run_result chdir(initial_cwd) diff --git a/tests/test_backend_failure.py b/tests/test_backend_failure.py deleted file mode 100755 index a6a56e3fdb..0000000000 --- a/tests/test_backend_failure.py +++ /dev/null @@ -1,293 +0,0 @@ -#!/usr/bin/env python3 - -import logging -import argparse - -from os import getcwd, chdir - -from test_fixtures import with_aktualizr, with_uptane_backend, KeyStore, with_secondary, with_path,\ - DownloadInterruptionHandler, MalformedJsonHandler, with_director, with_imagerepo, InstallManager,\ - with_install_manager, with_images, MalformedImageHandler, with_customrepo, SlowRetrievalHandler, \ - RedirectHandler, with_sysroot, with_treehub - - -logger = logging.getLogger(__file__) - -""" -Verifies whether aktualizr is updatable after director metadata download failure -with follow-up successful metadata download. - -Currently, it's tested against two types of metadata download/parsing failure: - - download interruption - metadata file download is interrupted once|three times, after that it's successful - - malformed json - aktualizr receives malformed json/metadata as a response to the first request for metadata, - a response to subsequent request is successful - -Note: Aktualizr doesn't send any installation report in manifest in case of metadata download failure -https://saeljira.it.here.com/browse/OTA-3730 -""" -@with_uptane_backend(start_generic_server=True) -@with_path(paths=['/1.root.json', '/root.json', '/targets.json']) -@with_director(handlers=[ - DownloadInterruptionHandler(number_of_failures=1), - MalformedJsonHandler(number_of_failures=1), - DownloadInterruptionHandler(number_of_failures=3), - ], start=False) -@with_aktualizr(start=False, run_mode='full') -@with_install_manager() -def test_backend_failure_sanity_director_update_after_metadata_download_failure(install_mngr, director, - aktualizr, **kwargs): - with aktualizr: - with director: - # we have to stop director before terminating aktualizr since the later doesn't support graceful shutdown - # (doesn't handle any signal (SIGTERM, SIGKILL, etc) what leads to receiving broken requests at director - # https://saeljira.it.here.com/browse/OTA-3744 - install_result = director.wait_for_install() - install_result = install_result and install_mngr.are_images_installed() - return install_result - - -""" -Verifies whether aktualizr is updatable after image metadata download failure -with follow-up successful metadata download. - -Currently, it's tested against two types of metadata download/parsing failure: - - download interruption - metadata file download is interrupted once|three times, after that it's successful - - malformed json - aktualizr receives malformed json/metadata as a response to the first request for metadata, - a response to subsequent request is successful - -Note: Aktualizr doesn't send any installation report in manifest in case of metadata download failure -""" -@with_uptane_backend(start_generic_server=True) -@with_path(paths=['/1.root.json', '/root.json', '/timestamp.json', '/snapshot.json', '/targets.json']) -@with_imagerepo(handlers=[ - DownloadInterruptionHandler(number_of_failures=1), - MalformedJsonHandler(number_of_failures=1), - DownloadInterruptionHandler(number_of_failures=3), - ]) -@with_director(start=False) -@with_aktualizr(run_mode='full') -@with_install_manager() -def test_backend_failure_sanity_imagerepo_update_after_metadata_download_failure(install_mngr, director, - aktualizr, **kwargs): - with aktualizr: - with director: - install_result = director.wait_for_install() - logger.info('Director install result: {}'.format(install_result)) - install_result = install_result and install_mngr.are_images_installed() - logger.info('Are images installed: {}'.format(install_result)) - return install_result - - -""" -Verifies whether aktualizr is updatable after image download failure -with follow-up successful download. - -Currently, it's tested against two types of image download failure: - - download interruption - file download is interrupted once, after that it's successful - - malformed image - image download is successful but it's malformed. It happens once after that it's successful -""" -@with_uptane_backend(start_generic_server=True) -@with_images(images_to_install=[(('primary-hw-ID-001', 'primary-ecu-id'), 'primary-image.img')]) -@with_imagerepo(handlers=[ - # TODO: test fails because aktualizr issues byte range request - # that are not supported by server - # https://saeljira.it.here.com/browse/OTA-3716 - #DownloadInterruptionHandler(number_of_failures=1, url='/targets/primary-image.img'), - MalformedImageHandler(number_of_failures=1, url='/targets/primary-image.img'), - ]) -@with_director(start=False) -@with_aktualizr(run_mode='full', id=('primary-hw-ID-001', 'primary-ecu-id')) -@with_install_manager() -def test_backend_failure_sanity_imagerepo_update_after_image_download_failure(install_mngr, director, - aktualizr, **kwargs): - with aktualizr: - with director: - install_result = director.wait_for_install() - install_result = install_result and install_mngr.are_images_installed() - return install_result - - -""" - Verifies whether aktualizr is updatable after malformed image is downloaded - from a custom image server with follow-up successful download. -""" -@with_uptane_backend(start_generic_server=True) -@with_customrepo(handlers=[ - # TODO: This test fails because the issue with image download - # from a server that doesn't support byte range requests - # DownloadInterruptionHandler(number_of_failures=1, url='/primary-image.img'), - # https://saeljira.it.here.com/browse/OTA-3716 - MalformedImageHandler(number_of_failures=1, url='/primary-image.img') - # TODO: this test fails too, although httpclient.cc sets - # CURLOPT_LOW_SPEED_TIME and CURLOPT_LOW_SPEED_TIME - # https://saeljira.it.here.com/browse/OTA-3737 - #SlowRetrievalHandler(url='/primary-image.img') - ]) -@with_imagerepo() -@with_director(start=False) -@with_aktualizr(run_mode='full') -def test_backend_failure_sanity_customrepo_update_after_image_download_failure(uptane_repo, custom_repo, director, - aktualizr, **kwargs): - update_hash = uptane_repo.add_image(aktualizr.id, 'primary-image.img', - custom_url=custom_repo.base_url + '/' + 'primary-image.img') - - with aktualizr: - with director: - install_result = director.wait_for_install() - - return install_result and update_hash == aktualizr.get_current_image_info(aktualizr.id) - - -""" - Verifies whether aktualizr is updatable after failure of object(s) download from Treehub/ostree repo - with follow-up successful download. - - Currently, it's tested against two types of object download failure: - - download interruption - object download is interrupted once, after that it's successful - - malformed object - object download is successful but it's malformed. It happens once after that it's successful -""" -@with_uptane_backend(start_generic_server=True) -@with_director() -@with_treehub(handlers=[ - DownloadInterruptionHandler(url='/objects/41/5ce9717fc7a5f4d743a4f911e11bd3ed83930e46756303fd13a3eb7ed35892.filez'), - MalformedImageHandler(url='/objects/41/5ce9717fc7a5f4d743a4f911e11bd3ed83930e46756303fd13a3eb7ed35892.filez'), - - # TODO: ostree objects download is not resilient to `Slow Retrieval Attack` - # https://saeljira.it.here.com/browse/OTA-3737 - #SlowRetrievalHandler(url='/objects/6b/1604b586fcbe052bbc0bd9e1c8040f62e085ca2e228f37df957ac939dff361.filez'), - - # TODO: Limit a number of HTTP redirects within a single request processing - # https://saeljira.it.here.com/browse/OTA-3729 - #RedirectHandler(number_of_redirects=1000, url='/objects/41/5ce9717fc7a5f4d743a4f911e11bd3ed83930e46756303fd13a3eb7ed35892.filez') -]) -@with_sysroot() -@with_aktualizr(start=False, run_mode='once') -def test_backend_failure_sanity_treehub_update_after_image_download_failure(uptane_repo, - aktualizr, - director, - uptane_server, - sysroot, treehub): - target_rev = treehub.revision - uptane_repo.add_ostree_target(aktualizr.id, target_rev) - with aktualizr: - aktualizr.wait_for_completion() - - pending_rev = aktualizr.get_primary_pending_version() - if pending_rev != target_rev: - logger.error("Pending version {} != the target one {}".format(pending_rev, target_rev)) - return False - - sysroot.update_revision(pending_rev) - aktualizr.emulate_reboot() - - with aktualizr: - aktualizr.wait_for_completion() - - result = director.get_install_result() and (target_rev == aktualizr.get_current_primary_image_info()) - return result - - -""" - Verifies if aktualizr supports redirects - update is successful after redirect - Note: should aktualizr support unlimited number of redirects -""" -@with_uptane_backend(start_generic_server=True) -# TODO: Limit a number of HTTP redirects within a single request processing -# https://saeljira.it.here.com/browse/OTA-3729 -@with_customrepo(handlers=[ - RedirectHandler(number_of_redirects=10, url='/primary-image.img') - ]) -@with_imagerepo() -@with_director() -@with_aktualizr(run_mode='once', output_logs=True) -def test_backend_failure_sanity_customrepo_update_redirect(aktualizr, uptane_repo, - custom_repo, director, **kwargs): - update_hash = uptane_repo.add_image(aktualizr.id, 'primary-image.img', - custom_url=custom_repo.base_url + '/' + 'primary-image.img') - install_result = director.wait_for_install() - return install_result and update_hash == aktualizr.get_current_image_info(aktualizr.id) - -""" - Verifies whether an update fails if director metadata download fails or they are malformed - - download is interrupted three times - - malformed json is received -""" -@with_uptane_backend(start_generic_server=True) -# TODO: if root.json is malformed aktualizr ignores it and proceed with an update -# https://saeljira.it.here.com/browse/OTA-3717 -# @with_path(paths=['/root.json']) - -# TODO: if 1.root.json download from director fails then aktualizr just exits -# https://saeljira.it.here.com/browse/OTA-3728 -#@with_path(paths=['/1.root.json']) -@with_path(paths=['/targets.json']) -@with_imagerepo() -@with_director(handlers=[ - DownloadInterruptionHandler(number_of_failures=3), - MalformedJsonHandler(number_of_failures=1), - ]) -@with_aktualizr(run_mode='once') -@with_install_manager() -def test_backend_failure_sanity_director_unsuccessful_download(install_mngr, aktualizr, - director, **kwargs): - aktualizr.wait_for_completion() - return not (director.get_install_result() or install_mngr.are_images_installed()) - - -""" - Verifies whether an update fails if repo metadata download fails or they are malformed - - download is interrupted three times - - malformed json is received -""" -@with_uptane_backend(start_generic_server=True) -#@with_path(paths=['/root.json']) # TODO: if root.json is malformed aktualizr ignores it and proceed with an update -@with_path(paths=['/1.root.json', '/timestamp.json', '/snapshot.json', '/targets.json']) -@with_imagerepo(handlers=[ - DownloadInterruptionHandler(number_of_failures=3), - MalformedJsonHandler(number_of_failures=1), - ]) -@with_director() -@with_aktualizr(run_mode='once') -@with_install_manager() -def test_backend_failure_sanity_imagerepo_unsuccessful_download(install_mngr, aktualizr, - director, **kwargs): - aktualizr.wait_for_completion() - return not (director.get_install_result() or install_mngr.are_images_installed()) - - -if __name__ == "__main__": - logging.basicConfig(level=logging.INFO) - - parser = argparse.ArgumentParser(description='Test backend failure') - parser.add_argument('-b', '--build-dir', help='build directory', default='build') - parser.add_argument('-s', '--src-dir', help='source directory', default='.') - parser.add_argument('-o', '--ostree', help='ostree support', default='OFF') - input_params = parser.parse_args() - - KeyStore.base_dir = input_params.src_dir - initial_cwd = getcwd() - chdir(input_params.build_dir) - - test_suite = [ - test_backend_failure_sanity_treehub_update_after_image_download_failure, - test_backend_failure_sanity_director_update_after_metadata_download_failure, - test_backend_failure_sanity_imagerepo_update_after_metadata_download_failure, - test_backend_failure_sanity_imagerepo_update_after_image_download_failure, - test_backend_failure_sanity_customrepo_update_after_image_download_failure, - test_backend_failure_sanity_director_unsuccessful_download, - test_backend_failure_sanity_imagerepo_unsuccessful_download, - test_backend_failure_sanity_customrepo_update_redirect, - ] - - if input_params.ostree == 'ON': - test_suite.append(test_backend_failure_sanity_treehub_update_after_image_download_failure) - - test_suite_run_result = True - for test in test_suite: - logger.info('>>> Running {}...'.format(test.__name__)) - test_run_result = test() - logger.info('>>> {}: {}'.format('OK' if test_run_result else 'Failed', test.__name__)) - test_suite_run_result = test_suite_run_result and test_run_result - - chdir(initial_cwd) - exit(0 if test_suite_run_result else 1) diff --git a/tests/test_customrepo_failure.py b/tests/test_customrepo_failure.py new file mode 100755 index 0000000000..e59e1ae516 --- /dev/null +++ b/tests/test_customrepo_failure.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python3 + +import logging +import argparse + +from os import getcwd, chdir + +from test_fixtures import with_aktualizr, with_uptane_backend, KeyStore, with_secondary, with_path,\ + DownloadInterruptionHandler, MalformedJsonHandler, with_director, with_imagerepo, InstallManager,\ + with_install_manager, with_images, MalformedImageHandler, with_customrepo, SlowRetrievalHandler, \ + RedirectHandler, with_sysroot, with_treehub, TestRunner + + +logger = logging.getLogger(__file__) + + +""" + Verifies whether aktualizr is updatable after malformed image is downloaded + from a custom image server with follow-up successful download. +""" +@with_uptane_backend(start_generic_server=True) +@with_customrepo(handlers=[ + DownloadInterruptionHandler(number_of_failures=1, url='/primary-image.img'), + MalformedImageHandler(number_of_failures=1, url='/primary-image.img') + # TODO: this test fails too, although httpclient.cc sets + # CURLOPT_LOW_SPEED_TIME and CURLOPT_LOW_SPEED_TIME + # https://saeljira.it.here.com/browse/OTA-3737 + #SlowRetrievalHandler(url='/primary-image.img') + ]) +@with_imagerepo() +@with_director(start=False) +@with_aktualizr(start=False, run_mode='full') +def test_customrepo_update_after_image_download_failure(uptane_repo, custom_repo, director, + aktualizr, **kwargs): + update_hash = uptane_repo.add_image(aktualizr.id, 'primary-image.img', + custom_url=custom_repo.base_url + '/' + 'primary-image.img') + + with aktualizr: + with director: + install_result = director.wait_for_install() + + return install_result and update_hash == aktualizr.get_current_image_info(aktualizr.id) + + +""" + Verifies if aktualizr supports redirects - update is successful after redirect + Note: should aktualizr support unlimited number of redirects +""" +@with_uptane_backend(start_generic_server=True) +@with_customrepo(handlers=[ + RedirectHandler(number_of_redirects=10, url='/primary-image.img') + ]) +@with_imagerepo() +@with_director() +@with_aktualizr(run_mode='once', output_logs=True) +def test_customrepo_update_redirect(aktualizr, uptane_repo, + custom_repo, director, **kwargs): + update_hash = uptane_repo.add_image(aktualizr.id, 'primary-image.img', + custom_url=custom_repo.base_url + '/' + 'primary-image.img') + install_result = director.wait_for_install() + return install_result and update_hash == aktualizr.get_current_image_info(aktualizr.id) + +""" + Verifies if aktualizr rejects redirects over 10 times - update fails after redirect +""" +@with_uptane_backend(start_generic_server=True) +@with_customrepo(handlers=[ + RedirectHandler(number_of_redirects=(11 * 3 + 1), url='/primary-image.img') + ]) +@with_imagerepo() +@with_director() +@with_aktualizr(start=False, run_mode='once', output_logs=True) +def test_customrepo_unsuccessful_update_redirect(aktualizr, uptane_repo, + custom_repo, director, **kwargs): + update_hash = uptane_repo.add_image(aktualizr.id, 'primary-image.img', + custom_url=custom_repo.base_url + '/' + 'primary-image.img') + with aktualizr: + aktualizr.wait_for_completion() + + return not director.get_install_result() + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + + parser = argparse.ArgumentParser(description='Test backend failure') + parser.add_argument('-b', '--build-dir', help='build directory', default='build') + parser.add_argument('-s', '--src-dir', help='source directory', default='.') + input_params = parser.parse_args() + + KeyStore.base_dir = input_params.src_dir + initial_cwd = getcwd() + chdir(input_params.build_dir) + + test_suite = [ + test_customrepo_update_after_image_download_failure, + test_customrepo_update_redirect, + test_customrepo_unsuccessful_update_redirect, + ] + + with TestRunner(test_suite) as runner: + test_suite_run_result = runner.run() + + chdir(initial_cwd) + exit(0 if test_suite_run_result else 1) diff --git a/tests/test_data/cred.p12 b/tests/test_data/cred.p12 index 2ec0b09e06..e6b59cae01 100644 Binary files a/tests/test_data/cred.p12 and b/tests/test_data/cred.p12 differ diff --git a/tests/test_data/cred.zip b/tests/test_data/cred.zip index d41d8a2094..807364fb0e 100644 Binary files a/tests/test_data/cred.zip and b/tests/test_data/cred.zip differ diff --git a/tests/test_data/credentials.zip b/tests/test_data/credentials.zip index 4b54fb4667..94755bf09a 100644 Binary files a/tests/test_data/credentials.zip and b/tests/test_data/credentials.zip differ diff --git a/tests/test_director_failure.py b/tests/test_director_failure.py new file mode 100755 index 0000000000..9a6816210b --- /dev/null +++ b/tests/test_director_failure.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 + +import logging +import argparse + +from os import getcwd, chdir +from test_fixtures import KeyStore, with_uptane_backend, with_path, with_director, with_aktualizr,\ + with_install_manager, with_imagerepo, TestRunner, \ + DownloadInterruptionHandler, MalformedJsonHandler, DownloadInterruptionHandler + +logger = logging.getLogger(__file__) + + +""" +Verifies whether aktualizr is updatable after director metadata download failure +with follow-up successful metadata download. + +Currently, it's tested against two types of metadata download/parsing failure: + - download interruption - metadata file download is interrupted once|three times, after that it's successful + - malformed json - aktualizr receives malformed json/metadata as a response to the first request for metadata, + a response to subsequent request is successful + +Note: Aktualizr doesn't send any installation report in manifest in case of metadata download failure +https://saeljira.it.here.com/browse/OTA-3730 +""" +@with_uptane_backend(start_generic_server=True) +@with_path(paths=['/1.root.json', '/targets.json']) +@with_director(handlers=[ + DownloadInterruptionHandler(number_of_failures=1), + MalformedJsonHandler(number_of_failures=1), + DownloadInterruptionHandler(number_of_failures=3), + ], start=False) +@with_aktualizr(start=False, run_mode='full') +@with_install_manager() +def test_director_update_after_metadata_download_failure(install_mngr, director, + aktualizr, **kwargs): + with director: + with aktualizr: + install_result = director.wait_for_install() + install_result = install_result and install_mngr.are_images_installed() + return install_result + + +""" + Verifies whether an update fails if director metadata download fails or they are malformed + - download is interrupted three times + - malformed json is received +""" +@with_uptane_backend(start_generic_server=True) +@with_path(paths=['/1.root.json', '/targets.json']) +@with_imagerepo() +@with_director(handlers=[ + DownloadInterruptionHandler(number_of_failures=3), + MalformedJsonHandler(number_of_failures=1), + ]) +@with_aktualizr(run_mode='once') +@with_install_manager() +def test_director_unsuccessful_download(install_mngr, aktualizr, + director, **kwargs): + aktualizr.wait_for_completion() + return not (director.get_install_result() or install_mngr.are_images_installed()) + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + + parser = argparse.ArgumentParser(description='Test backend failure') + parser.add_argument('-b', '--build-dir', help='build directory', default='build') + parser.add_argument('-s', '--src-dir', help='source directory', default='.') + input_params = parser.parse_args() + + KeyStore.base_dir = input_params.src_dir + initial_cwd = getcwd() + chdir(input_params.build_dir) + + test_suite = [ + test_director_update_after_metadata_download_failure, + test_director_unsuccessful_download + ] + + with TestRunner(test_suite) as runner: + test_suite_run_result = runner.run() + + chdir(initial_cwd) + exit(0 if test_suite_run_result else 1) diff --git a/tests/test_fixtures.py b/tests/test_fixtures.py index 7b628b97cb..1fed550da2 100644 --- a/tests/test_fixtures.py +++ b/tests/test_fixtures.py @@ -3,67 +3,83 @@ import json import tempfile import threading -import time import os import shutil import signal import socket +import time -from os import devnull -from os import path +from io import BytesIO +from os import path, urandom from uuid import uuid4 -from os import urandom from functools import wraps +from multiprocessing import pool, cpu_count from http.server import SimpleHTTPRequestHandler, HTTPServer - +from threading import Thread from fake_http_server.fake_test_server import FakeTestServerBackground from sota_tools.treehub_server import create_repo +from shutil import copyfileobj logger = logging.getLogger(__name__) +class CopyThread(Thread): + def __init__(self, src, dest): + super().__init__() + self._src = src + self._dst = dest + + def run(self): + copyfileobj(self._src, self._dst, 1024) + + class Aktualizr: def __init__(self, aktualizr_primary_exe, aktualizr_info_exe, id, - uptane_server, ca, pkey, cert, wait_port=9040, wait_timeout=60, log_level=1, - secondary=None, output_logs=True, + uptane_server, wait_port=9040, wait_timeout=60, log_level=1, + primary_port=None, secondaries=None, secondary_wait_sec=600, output_logs=True, run_mode='once', director=None, image_repo=None, sysroot=None, treehub=None, ostree_mock_path=None, **kwargs): self.id = id - self._aktualizr_primary_exe = aktualizr_primary_exe self._aktualizr_info_exe = aktualizr_info_exe self._storage_dir = tempfile.TemporaryDirectory() self._log_level = log_level self._sentinel_file = 'need_reboot' self.reboot_sentinel_file = os.path.join(self._storage_dir.name, self._sentinel_file) + self._import_dir = os.path.join(self._storage_dir.name, 'import') + KeyStore.copy_keys(self._import_dir) with open(path.join(self._storage_dir.name, 'secondary_config.json'), 'w+') as secondary_config_file: secondary_cfg = json.loads(Aktualizr.SECONDARY_CONFIG_TEMPLATE. - format(port=secondary.primary_port if secondary else wait_port, + format(port=primary_port if primary_port else wait_port, timeout=wait_timeout)) json.dump(secondary_cfg, secondary_config_file) self._secondary_config_file = secondary_config_file.name + self._secondary_wait_sec = secondary_wait_sec with open(path.join(self._storage_dir.name, 'config.toml'), 'w+') as config_file: config_file.write(Aktualizr.CONFIG_TEMPLATE.format(server_url=uptane_server.base_url, - ca_path=ca, pkey_path=pkey, cert_path=cert, + import_path=self._import_dir, serial=id[1], hw_ID=id[0], - storage_dir=self._storage_dir, + storage_dir=self._storage_dir.name, db_path=path.join(self._storage_dir.name, 'sql.db'), log_level=self._log_level, secondary_cfg_file=self._secondary_config_file, + secondary_wait_sec=self._secondary_wait_sec, director=director.base_url if director else '', image_repo=image_repo.base_url if image_repo else '', - pacmam_type='ostree' if treehub and sysroot else 'fake', + pacman_type='ostree' if treehub and sysroot else 'none', ostree_sysroot=sysroot.path if sysroot else '', treehub_server=treehub.base_url if treehub else '', sentinel_dir=self._storage_dir.name, sentinel_name=self._sentinel_file)) self._config_file = config_file.name - self.add_secondary(secondary) if secondary else None + if secondaries is not None: + for s in secondaries: + self.add_secondary(s) self._output_logs = output_logs self._run_mode = run_mode self._run_env = {} @@ -76,9 +92,10 @@ def __init__(self, aktualizr_primary_exe, aktualizr_info_exe, id, server = "{server_url}" [import] - tls_cacert_path = "{ca_path}" - tls_pkey_path = "{pkey_path}" - tls_clientcert_path = "{cert_path}" + base_path = "{import_path}" + tls_cacert_path = "ca.pem" + tls_pkey_path = "pkey.pem" + tls_clientcert_path = "client.pem" [provision] primary_ecu_serial = "{serial}" @@ -86,11 +103,11 @@ def __init__(self, aktualizr_primary_exe, aktualizr_info_exe, id, [storage] path = "{storage_dir}" - type = "sqlite" sqldb_path = "{db_path}" [pacman] - type = "{pacmam_type}" + type = "{pacman_type}" + images_path = "{storage_dir}/images" sysroot = "{ostree_sysroot}" ostree_server = "{treehub_server}" os = "dummy-os" @@ -98,6 +115,7 @@ def __init__(self, aktualizr_primary_exe, aktualizr_info_exe, id, [uptane] polling_sec = 0 secondary_config_file = "{secondary_cfg_file}" + secondary_preinstall_wait_sec = {secondary_wait_sec} director_server = "{director}" repo_server = "{image_repo}" @@ -121,12 +139,25 @@ def __init__(self, aktualizr_primary_exe, aktualizr_info_exe, id, }} ''' + def set_mode(self, mode): + self._run_mode = mode + def add_secondary(self, secondary): with open(self._secondary_config_file, "r+") as config_file: sec_cfg = json.load(config_file) - sec_cfg["IP"]["secondaries"].append({"addr": "127.0.0.1:{}".format(secondary.port)}) + sec_cfg["IP"]["secondaries"].append({"addr": "127.0.0.1:{}".format(secondary.port), "verification_type": "{}".format(secondary.verification_type)}) + config_file.seek(0) + json.dump(sec_cfg, config_file) + logger.debug("IP Secondary {} has been added with port {} and verification type {}".format(secondary.id, secondary.port, secondary.verification_type)) + + def remove_secondary(self, secondary): + with open(self._secondary_config_file, "r+") as config_file: + sec_cfg = json.load(config_file) + sec_cfg["IP"]["secondaries"].remove({"addr": "127.0.0.1:{}".format(secondary.port), "verification_type": "{}".format(secondary.verification_type)}) config_file.seek(0) json.dump(sec_cfg, config_file) + config_file.truncate() + logger.debug("IP Secondary {} has been removed with port {} and verification type {}".format(secondary.id, secondary.port, secondary.verification_type)) def update_wait_timeout(self, timeout): with open(self._secondary_config_file, "r+") as config_file: @@ -139,27 +170,32 @@ def run(self, run_mode): subprocess.run([self._aktualizr_primary_exe, '-c', self._config_file, '--run-mode', run_mode], check=True, env=self._run_env) - def get_info(self, retry=10): + # another ugly stuff that could be replaced with something more reliable if Aktualizr had exposed API + # to check status or aktualizr-info had output status/info in a structured way (e.g. json) + def get_info(self, retry=30): info_exe_res = None for ii in range(0, retry): info_exe_res = subprocess.run([self._aktualizr_info_exe, '-c', self._config_file], timeout=60, stdout=subprocess.PIPE, env=self._run_env) if info_exe_res.returncode == 0 and \ - str(info_exe_res.stdout).find('no details about installed nor pending images') != -1: + str(info_exe_res.stdout).find('Provisioned on server: yes') != -1 and \ + str(info_exe_res.stdout).find('Current Primary ECU running version:') != -1: break if info_exe_res and info_exe_res.returncode == 0: return str(info_exe_res.stdout) else: + logger.error('Failed to get aktualizr status info, stdout: {}, stderr: {}'. + format(str(info_exe_res.stdout), str(info_exe_res.stderr))) return None # ugly stuff that could be removed if Aktualizr had exposed API to check status - # or aktializr-info had output status/info in a structured way (e.g. json) + # or aktualizr-info had output status/info in a structured way (e.g. json) def is_ecu_registered(self, ecu_id): device_status = self.get_info() if not ((device_status.find(ecu_id[0]) != -1) and (device_status.find(ecu_id[1]) != -1)): return False - not_registered_field = "Removed or not registered ecus:" + not_registered_field = "Removed or unregistered ECUs (deprecated):" not_reg_start = device_status.find(not_registered_field) return not_reg_start == -1 or (device_status.find(ecu_id[1], not_reg_start) == -1) @@ -169,13 +205,15 @@ def get_current_image_info(self, ecu_id): else: return self._get_current_image_info(ecu_id) - # applicable only to secondary ECUs due to inconsistency in presenting information - # about primary and secondary ECUs - # ugly stuff that could be removed if Aktualizr had exposed API to check status - # or aktializr-info had output status/info in a structured way (e.g. json) - def _get_current_image_info(self, ecu_id): - secondary_image_hash_field = 'installed image hash: ' - secondary_image_filename_field = 'installed image filename: ' + def get_current_pending_image_info(self, ecu_id): + return self._get_current_image_info(ecu_id, secondary_image_hash_field='pending image hash: ') + + # applicable only to Secondary ECUs due to inconsistency in presenting information + # about Primary and Secondary ECUs + # ugly stuff that could be removed if aktualizr had exposed API to check status + # or aktualizr-info had output status/info in a structured way (e.g. json) + def _get_current_image_info(self, ecu_id, secondary_image_hash_field='installed image hash: '): + #secondary_image_filename_field = 'installed image filename: ' aktualizr_status = self.get_info() ecu_serial = ecu_id[1] ecu_info_position = aktualizr_status.find(ecu_serial) @@ -193,18 +231,22 @@ def _get_current_image_info(self, ecu_id): return hash_val # ugly stuff that could be removed if Aktualizr had exposed API to check status - # or aktializr-info had output status/info in a structured way (e.g. json) + # or aktualizr-info had output status/info in a structured way (e.g. json) def get_current_primary_image_info(self): - primary_hash_field = 'Current primary ecu running version: ' + primary_hash_field = 'Current Primary ECU running version: ' aktualizr_status = self.get_info() - start = aktualizr_status.find(primary_hash_field) - end = aktualizr_status.find('\\n', start) - return aktualizr_status[start + len(primary_hash_field):end] + if aktualizr_status: + start = aktualizr_status.find(primary_hash_field) + end = aktualizr_status.find('\\n', start) + return aktualizr_status[start + len(primary_hash_field):end] + else: + logger.error("Failed to get aktualizr info/status") + return "" # ugly stuff that could be removed if Aktualizr had exposed API to check status - # or aktializr-info had output status/info in a structured way (e.g. json) + # or aktualizr-info had output status/info in a structured way (e.g. json) def get_primary_pending_version(self): - primary_hash_field = 'Pending primary ecu version: ' + primary_hash_field = 'Pending Primary ECU version: ' aktualizr_status = self.get_info() start = aktualizr_status.find(primary_hash_field) end = aktualizr_status.find('\\n', start) @@ -216,19 +258,28 @@ def __enter__(self): stderr=None if self._output_logs else subprocess.STDOUT, close_fds=True, env=self._run_env) + if not self._output_logs: + self._stdout = BytesIO() + self._stdout_thread = CopyThread(self._process.stdout, self._stdout) + self._stdout_thread.start() logger.debug("Aktualizr has been started") return self def __exit__(self, exc_type, exc_val, exc_tb): self._process.terminate() self._process.wait(timeout=60) + if not self._output_logs: + self._stdout_thread.join(10) logger.debug("Aktualizr has been stopped") def terminate(self, sig=signal.SIGTERM): self._process.send_signal(sig) def output(self): - return self._process.stdout.read().decode(errors='replace') + if self._output_logs: + # stdout has gone to the console... + raise Exception("Can't get output from Aktualizr object if output_logs is set") + return self._stdout.getbuffer().tobytes().decode(errors='replace') def wait_for_completion(self, timeout=120): self._process.wait(timeout) @@ -249,6 +300,13 @@ def emulate_reboot(self): class KeyStore: base_dir = "./" + @staticmethod + def copy_keys(dest_path): + os.mkdir(dest_path) + shutil.copy(KeyStore.ca(), dest_path) + shutil.copy(KeyStore.pkey(), dest_path) + shutil.copy(KeyStore.cert(), dest_path) + @staticmethod def ca(): return path.join(KeyStore.base_dir, 'tests/test_data/prov_testupdate/ca.pem') @@ -264,26 +322,53 @@ def cert(): class IPSecondary: - def __init__(self, aktualizr_secondary_exe, id, port=9050, primary_port=9040): + def __init__(self, id, aktualizr_secondary_exe='src/aktualizr_secondary/aktualizr-secondary', port=None, primary_port=None, + sysroot=None, treehub=None, output_logs=True, force_reboot=False, + ostree_mock_path=None, verification_type="Full", **kwargs): self.id = id - self.port = port self._aktualizr_secondary_exe = aktualizr_secondary_exe - self._storage_dir = tempfile.TemporaryDirectory() - self.port = self.get_free_port() - self.primary_port = self.get_free_port() + self.storage_dir = tempfile.TemporaryDirectory() + self.port = self.get_free_port() if port is None else port + self.primary_port = self.get_free_port() if primary_port is None else primary_port + self._sentinel_file = 'need_reboot' + self._output_logs = output_logs + self.reboot_sentinel_file = os.path.join(self.storage_dir.name, self._sentinel_file) + self.verification_type = verification_type - with open(path.join(self._storage_dir.name, 'config.toml'), 'w+') as config_file: + if force_reboot: + reboot_command = "rm {}".format(self.reboot_sentinel_file) + else: + reboot_command = "" + + with open(path.join(self.storage_dir.name, 'config.toml'), 'w+') as config_file: config_file.write(IPSecondary.CONFIG_TEMPLATE.format(serial=id[1], hw_ID=id[0], + force_reboot=1 if force_reboot else 0, + reboot_command=reboot_command, port=self.port, primary_port=self.primary_port, - storage_dir=self._storage_dir, - db_path=path.join(self._storage_dir.name, 'db.sql'))) + storage_dir=self.storage_dir.name, + db_path=path.join(self.storage_dir.name, 'db.sql'), + pacman_type='ostree' if treehub and sysroot else 'none', + ostree_sysroot=sysroot.path if sysroot else '', + treehub_server=treehub.base_url if treehub else '', + sentinel_dir=self.storage_dir.name, + sentinel_name=self._sentinel_file, + verification_type=self.verification_type + )) self._config_file = config_file.name + self._run_env = {} + if sysroot and ostree_mock_path: + self._run_env['LD_PRELOAD'] = os.path.abspath(ostree_mock_path) + self._run_env['OSTREE_DEPLOYMENT_VERSION_FILE'] = sysroot.version_file + + CONFIG_TEMPLATE = ''' [uptane] ecu_serial = "{serial}" ecu_hardware_id = "{hw_ID}" + force_install_completion = {force_reboot} + verification_type = "{verification_type}" [network] port = {port} @@ -291,13 +376,19 @@ def __init__(self, aktualizr_secondary_exe, id, port=9050, primary_port=9040): primary_port = {primary_port} [storage] - type = "sqlite" path = "{storage_dir}" sqldb_path = "{db_path}" - [pacman] - type = "fake" + type = "{pacman_type}" + sysroot = "{ostree_sysroot}" + ostree_server = "{treehub_server}" + os = "dummy-os" + + [bootloader] + reboot_sentinel_dir = "{sentinel_dir}" + reboot_sentinel_name = "{sentinel_name}" + reboot_command = "{reboot_command}" ''' def is_running(self): @@ -313,14 +404,35 @@ def get_free_port(): def __enter__(self): self._process = subprocess.Popen([self._aktualizr_secondary_exe, '-c', self._config_file], - stdout=open(devnull, 'w'), close_fds=True) - logger.debug("IP Secondary {} has been started: {}".format(self.id, self.port)) + stdout=None if self._output_logs else subprocess.PIPE, + stderr=None if self._output_logs else subprocess.STDOUT, + close_fds=True, + env=self._run_env) + if not self._output_logs: + self._stdout = BytesIO() + self._stdout_thread = CopyThread(self._process.stdout, self._stdout) + self._stdout_thread.start() + logger.debug("IP Secondary {} has been started with port {} and verification type {}".format(self.id, self.port, self.verification_type)) return self def __exit__(self, exc_type, exc_val, exc_tb): self._process.terminate() self._process.wait(timeout=60) - logger.debug("IP Secondary {} has been stopped".format(self.id)) + if not self._output_logs: + self._stdout_thread.join(10) + logger.debug("IP Secondary {} has been stopped with port {} and verification type {}".format(self.id, self.port, self.verification_type)) + + def output(self): + if self._output_logs: + # stdout has gone to the console... + raise Exception("Can't get output from IP Secondary object if output_logs is set") + return self._stdout.getbuffer().tobytes().decode(errors='replace') + + def wait_for_completion(self, timeout=120): + self._process.wait(timeout) + + def emulate_reboot(self): + os.remove(self.reboot_sentinel_file) class UptaneRepo(HTTPServer): @@ -360,6 +472,10 @@ def default_handler(self): self.end_headers() def default_get(self): + if not os.path.exists(self.file_path): + self.send_response(404) + self.end_headers() + return self.send_response(200) self.end_headers() with open(self.file_path, 'rb') as source: @@ -403,6 +519,7 @@ def __init__(self, uptane_repo_root, ifc, port, client_handler_map={}): super(DirectorRepo, self).__init__(os.path.join(uptane_repo_root, self.director_subdir), ifc=ifc, port=port, client_handler_map=client_handler_map) + self._manifest = None self._last_install_res = False self._last_install_res_lock = threading.RLock() self._installed_condition = threading.Condition() @@ -422,13 +539,14 @@ def handle_manifest(self): if json_data: install_report = json_data['signed'].get('installation_report', "") if install_report: - self.server.set_install_event(install_report['report']['result']['success']) + self.server.set_install_event(json_data) handler_map = {'PUT': {'/manifest': handle_manifest}} - def set_install_event(self, result): + def set_install_event(self, manifest): with self._installed_condition: - self._last_install_res = result + self._manifest = manifest + self._last_install_res = manifest['signed']['installation_report']['report']['result']['success'] self._installed_condition.notifyAll() def wait_for_install(self, timeout=180): @@ -440,6 +558,16 @@ def get_install_result(self): with self._installed_condition: return self._last_install_res + def get_manifest(self): + with self._installed_condition: + return self._manifest + + def get_ecu_manifest(self, ecu_serial): + return self.get_manifest()['signed']['ecu_version_manifests'][ecu_serial] + + def get_ecu_manifest_filepath(self, ecu_serial): + return self.get_ecu_manifest(ecu_serial)['signed']['installed_image']['filepath'] + class ImageRepo(UptaneRepo): """ @@ -467,7 +595,7 @@ def __init__(self, root, ifc, port, client_handler_map={}): class Treehub(UptaneRepo): """ - This server serves requests from an ostree client, i.e. emulates/mocks the treehub server + This server serves requests from an OSTree client, i.e. emulates/mocks the treehub server """ def __init__(self, ifc, port, client_handler_map={}): self.root = tempfile.mkdtemp() @@ -519,23 +647,32 @@ def map(self, url=''): class MalformedImageHandler: - def __init__(self, number_of_failures=1, url=''): + dummy_filez = (b'\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06' + + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x81\xa4\x00\x00\x00\x00' + + b'\x00\x19\x33\x34\x32\x36\x31\xe5\x02\x00') + + def __init__(self, number_of_failures=1, url='', fake_filez=False): self._number_of_failures = number_of_failures self._failure_counter = 0 self._url = url + self._fake_filez = fake_filez def __call__(self, request_handler): - if self._failure_counter < self._number_of_failures: + if self._number_of_failures == -1 or self._failure_counter < self._number_of_failures: request_handler.send_response(200) request_handler.end_headers() - request_handler.wfile.write(b'malformed image') + if self._fake_filez: + request_handler.wfile.write(self.dummy_filez) + else: + request_handler.wfile.write(b'malformed image') self._failure_counter += 1 else: request_handler.default_get() def map(self, url): - return {'GET': {url if url else self._url: MalformedImageHandler(self._number_of_failures)}} + return {'GET': {url if url else self._url: MalformedImageHandler(self._number_of_failures, + fake_filez=self._fake_filez)}} class SlowRetrievalHandler: @@ -643,8 +780,7 @@ def target_dir(self): def target_file(self): return path.join(self.image_dir, 'targets.json') - def add_image(self, id, image_filename, target_name=None, image_size=1024, custom_url=''): - + def add_image(self, id, image_filename, target_name=None, image_size=1024, custom_url='', custom_version=''): targetname = target_name if target_name else image_filename with open(path.join(self.image_dir, image_filename), 'wb') as image_file: @@ -657,6 +793,10 @@ def add_image(self, id, image_filename, target_name=None, image_size=1024, custo image_creation_cmdline.append('--url') image_creation_cmdline.append(custom_url) + if custom_version: + image_creation_cmdline.append('--customversion') + image_creation_cmdline.append(custom_version) + subprocess.run(image_creation_cmdline, cwd=self.image_dir, check=True) # update the director metadata @@ -673,27 +813,46 @@ def add_image(self, id, image_filename, target_name=None, image_size=1024, custo return target_hash - def add_ostree_target(self, id, rev_hash): + def add_ostree_target(self, id, rev_hash, target_name=None, expires_within_sec=(60 * 5), target_uri=None): + # emulate the backend behavior on defining a target name for OSTREE target format + target_name = rev_hash if target_name is None else "{}-{}".format(target_name, rev_hash) image_creation_cmdline = [self._repo_manager_exe, '--command', 'image', '--path', self.root_dir, - '--targetname', rev_hash, + '--targetname', target_name, '--targetsha256', rev_hash, '--targetlength', '0', - '--targetformat', 'OSTREE', '--hwid', id[0]] + if target_uri is not None: + image_creation_cmdline += ["--url", target_uri] subprocess.run(image_creation_cmdline, check=True) + expiration_time = time.time() + expires_within_sec + expiration_time_str = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(expiration_time)) + subprocess.run([self._repo_manager_exe, '--command', 'addtarget', '--path', self.root_dir, - '--targetname', rev_hash, + '--targetname', target_name, '--hwid', id[0], - '--serial', id[1]], + '--serial', id[1], + '--expires', expiration_time_str], check=True) subprocess.run([self._repo_manager_exe, '--path', self.root_dir, '--command', 'signtargets'], check=True) + return target_name + + def clear_targets(self): + subprocess.run([self._repo_manager_exe, '--path', self.root_dir, '--command', 'emptytargets'], check=True) + + def rotate_root(self, is_director): + if is_director: + repo_type = 'director' + else: + repo_type = 'image' + subprocess.run([self._repo_manager_exe, '--path', self.root_dir, '--command', 'rotate', '--repotype', repo_type, '--keytype', 'ED25519'], check=True) + def __enter__(self): self._generate_repo() return self @@ -702,20 +861,22 @@ def __exit__(self, exc_type, exc_val, exc_tb): shutil.rmtree(self.root_dir, ignore_errors=True) def _generate_repo(self): - subprocess.run([self._repo_manager_exe, '--path', self.root_dir, '--command', 'generate'], check=True) + subprocess.run([self._repo_manager_exe, '--path', self.root_dir, + '--command', 'generate', '--keytype', 'ED25519'], check=True) def with_aktualizr(start=True, output_logs=False, id=('primary-hw-ID-001', str(uuid4())), wait_timeout=60, - log_level=1, aktualizr_primary_exe='src/aktualizr_primary/aktualizr', + secondary_wait_sec=600, log_level=1, aktualizr_primary_exe='src/aktualizr_primary/aktualizr', aktualizr_info_exe='src/aktualizr_info/aktualizr-info', run_mode='once'): def decorator(test): @wraps(test) def wrapper(*args, ostree_mock_path=None, **kwargs): aktualizr = Aktualizr(aktualizr_primary_exe=aktualizr_primary_exe, - aktualizr_info_exe=aktualizr_info_exe, - id=id, ca=KeyStore.ca(), pkey=KeyStore.pkey(), cert=KeyStore.cert(), - wait_timeout=wait_timeout, log_level=log_level, output_logs=output_logs, + aktualizr_info_exe=aktualizr_info_exe, id=id, + wait_timeout=wait_timeout, + secondary_wait_sec=secondary_wait_sec, + log_level=log_level, output_logs=output_logs, run_mode=run_mode, ostree_mock_path=ostree_mock_path, **kwargs) if start: with aktualizr: @@ -795,17 +956,27 @@ def func(handler_map={}): return decorator -def with_secondary(start=True, id=('secondary-hw-ID-001', str(uuid4())), - aktualizr_secondary_exe='src/aktualizr_secondary/aktualizr-secondary'): +def with_secondary(start=True, output_logs=False, id=('secondary-hw-ID-001', None), + force_reboot=False, arg_name='secondary', + aktualizr_secondary_exe='src/aktualizr_secondary/aktualizr-secondary', + verification_type="Full"): def decorator(test): @wraps(test) def wrapper(*args, **kwargs): - secondary = IPSecondary(aktualizr_secondary_exe=aktualizr_secondary_exe, id=id) + id1 = id + if id1[1] is None: + id1 = (id1[0], str(uuid4())) + secondary = IPSecondary(aktualizr_secondary_exe=aktualizr_secondary_exe, output_logs=output_logs, + id=id1, force_reboot=force_reboot, verification_type=verification_type, **kwargs) + sl = kwargs.get("secondaries", []) + [secondary] + kwargs.update({arg_name: secondary, "secondaries": sl}) + if "primary_port" not in kwargs: + kwargs["primary_port"] = secondary.primary_port if start: with secondary: - result = test(*args, **kwargs, secondary=secondary) + result = test(*args, **kwargs) else: - result = test(*args, **kwargs, secondary=secondary) + result = test(*args, **kwargs) return result return wrapper return decorator @@ -960,3 +1131,52 @@ def func(handler_map={}): return wrapper return decorator + +class NonDaemonPool(pool.Pool): + def Process(self, *args, **kwds): + proc = super(NonDaemonPool, self).Process(*args, **kwds) + + class NonDaemonProcess(proc.__class__): + """Monkey-patch process to ensure it is never daemonized""" + + @property + def daemon(self): + return False + + @daemon.setter + def daemon(self, val): + pass + + proc.__class__ = NonDaemonProcess + + return proc + + +class TestRunner: + def __init__(self, tests): + self._tests = tests + self._test_runner_pool = NonDaemonPool(min(len(self._tests), cpu_count())) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + # This must be called, see https://docs.python.org/3/library/multiprocessing.html#multiprocessing.pool.Pool + self._test_runner_pool.__exit__(exc_type, exc_value, traceback) + + @staticmethod + def test_runner(test): + logger.info('>>> Running {}...'.format(test.__name__)) + test_run_result = test() + if test_run_result: + logger.info('\033[32m>>> OK: {}\033[0m\n'.format(test.__name__)) + else: + logger.info('\033[31m>>> FAILED: {}\033[0m\n'.format(test.__name__)) + return test_run_result + + def run(self): + results = self._test_runner_pool.map(TestRunner.test_runner, self._tests) + total_result = True + for result in results: + total_result = total_result and result + return total_result diff --git a/tests/test_imagerepo_failure.py b/tests/test_imagerepo_failure.py new file mode 100755 index 0000000000..0be37bd061 --- /dev/null +++ b/tests/test_imagerepo_failure.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python3 + +import logging +import argparse + +from os import getcwd, chdir +from test_fixtures import KeyStore, with_uptane_backend, with_path, with_director, with_aktualizr,\ + with_install_manager, with_imagerepo, with_images, MalformedImageHandler, \ + DownloadInterruptionHandler, MalformedJsonHandler, DownloadInterruptionHandler, TestRunner + + +logger = logging.getLogger(__file__) + + +""" +Verifies whether aktualizr is updatable after image metadata download failure +with follow-up successful metadata download. + +Currently, it's tested against two types of metadata download/parsing failure: + - download interruption - metadata file download is interrupted once|three times, after that it's successful + - malformed json - aktualizr receives malformed json/metadata as a response to the first request for metadata, + a response to subsequent request is successful + +Note: Aktualizr doesn't send any installation report in manifest in case of metadata download failure +""" +@with_uptane_backend(start_generic_server=True) +@with_path(paths=['/1.root.json', '/timestamp.json', '/snapshot.json', '/targets.json']) +@with_imagerepo(handlers=[ + DownloadInterruptionHandler(number_of_failures=1), + MalformedJsonHandler(number_of_failures=1), + DownloadInterruptionHandler(number_of_failures=3), + ]) +@with_director(start=False) +@with_aktualizr(start=False, run_mode='full') +@with_install_manager() +def test_imagerepo_update_after_metadata_download_failure(install_mngr, director, + aktualizr, **kwargs): + with aktualizr: + with director: + install_result = director.wait_for_install() + logger.info('Director install result: {}'.format(install_result)) + install_result = install_result and install_mngr.are_images_installed() + logger.info('Are images installed: {}'.format(install_result)) + return install_result + + +""" +Verifies aktualizr error logs in the occasion of an targets.json file that does not match +the hashes referenced in snapshot.json. +After the first failure, the correct file is sent, and the installation succeeds. +""" + +@with_uptane_backend(start_generic_server=True) +@with_path(paths=['/targets.json']) +@with_imagerepo(handlers=[ + MalformedJsonHandler(number_of_failures=1), + ]) +@with_director(start=False) +@with_aktualizr(start=False, run_mode='full', log_level=2) +@with_install_manager() +def test_incorrect_targets_logs(install_mngr, director, + aktualizr, **kwargs): + with aktualizr, director: + install_result = director.wait_for_install() + logger.info('Director install result: {}'.format(install_result)) + install_result = install_result and install_mngr.are_images_installed() + logger.info('Are images installed: {}'.format(install_result)) + output = aktualizr.output() + if not "Signature verification for Image repo Targets metadata failed: Hash metadata mismatch" in output: + return False + if not "Failed to update Image repo metadata: Hash metadata mismatch" in output: + return False + logger.info(output) + return install_result + +""" +Verifies whether aktualizr is updatable after image download failure +with follow-up successful download. + +Currently, it's tested against two types of image download failure: + - download interruption - file download is interrupted once, after that it's successful + - malformed image - image download is successful but it's malformed. It happens once after that it's successful +""" +@with_uptane_backend(start_generic_server=True) +@with_images(images_to_install=[(('primary-hw-ID-001', 'primary-ecu-id'), 'primary-image.img')]) +@with_imagerepo(handlers=[ + DownloadInterruptionHandler(number_of_failures=1, url='/targets/primary-image.img'), + MalformedImageHandler(number_of_failures=1, url='/targets/primary-image.img'), + ]) +@with_director(start=False) +@with_aktualizr(start=False, run_mode='full', id=('primary-hw-ID-001', 'primary-ecu-id')) +@with_install_manager() +def test_imagerepo_update_after_image_download_failure(install_mngr, director, + aktualizr, **kwargs): + with aktualizr: + with director: + install_result = director.wait_for_install() + install_result = install_result and install_mngr.are_images_installed() + return install_result + + +""" + Verifies whether an update fails if repo metadata download fails or they are malformed + - download is interrupted three times + - malformed json is received +""" +@with_uptane_backend(start_generic_server=True) +@with_path(paths=['/1.root.json', '/timestamp.json', '/snapshot.json', '/targets.json']) +@with_imagerepo(handlers=[ + DownloadInterruptionHandler(number_of_failures=3), + MalformedJsonHandler(number_of_failures=1), + ]) +@with_director() +@with_aktualizr(run_mode='once') +@with_install_manager() +def test_imagerepo_unsuccessful_download(install_mngr, aktualizr, + director, **kwargs): + aktualizr.wait_for_completion() + return not (director.get_install_result() or install_mngr.are_images_installed()) + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + + parser = argparse.ArgumentParser(description='Test backend failure') + parser.add_argument('-b', '--build-dir', help='build directory', default='build') + parser.add_argument('-s', '--src-dir', help='source directory', default='.') + input_params = parser.parse_args() + + KeyStore.base_dir = input_params.src_dir + initial_cwd = getcwd() + chdir(input_params.build_dir) + + test_suite = [ + test_imagerepo_update_after_metadata_download_failure, + test_imagerepo_update_after_image_download_failure, + test_imagerepo_unsuccessful_download, + test_incorrect_targets_logs, + ] + + with TestRunner(test_suite) as runner: + test_suite_run_result = runner.run() + + chdir(initial_cwd) + exit(0 if test_suite_run_result else 1) diff --git a/tests/test_install_aktualizr_and_update.sh b/tests/test_install_aktualizr_and_update.sh index 2ca79ca47e..8d28665f97 100755 --- a/tests/test_install_aktualizr_and_update.sh +++ b/tests/test_install_aktualizr_and_update.sh @@ -8,7 +8,7 @@ TEST_INSTALL_DESTDIR=$(mktemp -d)/install PORT=$("$SCRIPT_DIR/find_listening_port.sh" $!) trap 'kill %1' EXIT -until curl 127.0.0.1:"$PORT" &> /dev/null; do +until curl localhost:"$PORT" &> /dev/null; do sleep 0.2 done @@ -16,22 +16,32 @@ TEMP_DIR=$(mktemp -d) echo "update" >> "$TEMP_DIR/testupdate_2.0" -$1/src/aktualizr_primary/aktualizr --version +"$1"/src/aktualizr_primary/aktualizr --version -$1/src/uptane_generator/uptane-generator generate --path "$TEST_INSTALL_DESTDIR/fake_root" -$1/src/uptane_generator/uptane-generator image --path "$TEST_INSTALL_DESTDIR/fake_root" --targetname testupdate_2.0 --filename "$TEMP_DIR/testupdate_2.0" --hwid testupdate-device -$1/src/uptane_generator/uptane-generator addtarget --path "$TEST_INSTALL_DESTDIR/fake_root" --targetname testupdate_2.0 --hwid testupdate-device --serial 723f79763eda1c753ce565c16862c79acdde32eb922d6662f088083c51ffde66 -$1/src/uptane_generator/uptane-generator signtargets --path "$TEST_INSTALL_DESTDIR/fake_root" +"$1"/src/uptane_generator/uptane-generator generate --path "$TEST_INSTALL_DESTDIR/fake_root" +"$1"/src/uptane_generator/uptane-generator image --path "$TEST_INSTALL_DESTDIR/fake_root" --targetname testupdate_2.0 --filename "$TEMP_DIR/testupdate_2.0" --hwid testupdate-device +"$1"/src/uptane_generator/uptane-generator addtarget --path "$TEST_INSTALL_DESTDIR/fake_root" --targetname testupdate_2.0 --hwid testupdate-device --serial 723f79763eda1c753ce565c16862c79acdde32eb922d6662f088083c51ffde66 +"$1"/src/uptane_generator/uptane-generator signtargets --path "$TEST_INSTALL_DESTDIR/fake_root" +# shellcheck disable=SC2174 mkdir -m 700 -p "$TEMP_DIR/import" cp ./tests/test_data/prov_testupdate/* "$TEMP_DIR/import" -echo -e "[storage]\\npath = \"$TEMP_DIR\"\\n[import]\\nbase_path = \"$TEMP_DIR/import\"" > "$TEMP_DIR/conf.toml" -echo -e "[tls]\\nserver = \"http://localhost:$PORT\"" >> "$TEMP_DIR/conf.toml" -$1/src/aktualizr_primary/aktualizr -c ./tests/config/testupdate.toml -c "$TEMP_DIR/conf.toml" once +cat << EOF > "$TEMP_DIR/conf.toml" +[storage] +path = "$TEMP_DIR" +[import] +base_path = "$TEMP_DIR/import" +[tls] +server = "http://localhost:$PORT" +[pacman] +images_path = "$TEMP_DIR/images" +EOF + +"$1"/src/aktualizr_primary/aktualizr -c ./tests/config/testupdate.toml -c "$TEMP_DIR/conf.toml" once # check the updated file appeared in the installation directory and sha256sum matches expectation -filename_lower=$($1/src/aktualizr_info/aktualizr-info -c ./tests/config/testupdate.toml -c "$TEMP_DIR/conf.toml" --director-target | jq '(.signed.targets["testupdate_2.0"].hashes.sha256)') -filename=$(echo "$filename_lower" | tr [a-f] [A-F] | tr -d '"') +filename_lower=$("$1"/src/aktualizr_info/aktualizr-info -c ./tests/config/testupdate.toml -c "$TEMP_DIR/conf.toml" --director-target | jq '(.signed.targets["testupdate_2.0"].hashes.sha256)') +filename=$(echo "$filename_lower" | tr "a-f" "A-F" | tr -d '"') if [ ! -f "$TEMP_DIR/images/$filename" ];then echo "ERROR: $filename does not exist or sha256sum does not match." exit 1 diff --git a/tests/test_io_failure.py b/tests/test_io_failure.py index 760a1f6e69..58c9733f40 100755 --- a/tests/test_io_failure.py +++ b/tests/test_io_failure.py @@ -95,7 +95,7 @@ def main(): FakeTestServerBackground(repo_dir, srcdir=srcdir) as uptane_server, \ multiprocessing.Pool(args.jobs) as pool: - server = f'http://127.0.0.1:{uptane_server.port}' + server = f'http://localhost:{uptane_server.port}' print(f'Running tests on {server} (repo directory: {repo_dir})') if args.serve_only: diff --git a/tests/test_misc_ostree_update.py b/tests/test_misc_ostree_update.py new file mode 100755 index 0000000000..3093644e69 --- /dev/null +++ b/tests/test_misc_ostree_update.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 + +import logging +import argparse +import time + +from os import getcwd, chdir + +from test_fixtures import KeyStore, with_aktualizr, with_uptane_backend, with_secondary, with_director, with_imagerepo,\ + with_sysroot, with_treehub, TestRunner + + +logger = logging.getLogger(__file__) + + +""" + Test update of Primary and Secondary if their package manager differs, `ostree` + and binary (`none` or `fake`) respectively + + Aktualizr/Primary's package manager is set to `ostree` + Secondary's package manager is set to `fake` which means a file/binary update + Primary goal is to verify whether aktualizr succeeds with a binary/fake update of Secondary + while aktualizr/Primary is configured with OSTree package manager +""" +@with_uptane_backend(start_generic_server=True) +@with_secondary(start=True) +@with_director() +@with_treehub() +@with_sysroot() +@with_aktualizr(start=False, run_mode='once', output_logs=True) +def test_primary_ostree_secondary_file_updates(uptane_repo, secondary, aktualizr, director, sysroot, + treehub, **kwargs): + target_rev = treehub.revision + # add an OSTree update for Primary + uptane_repo.add_ostree_target(aktualizr.id, target_rev) + # add a fake/binary update for Secondary + secondary_update_hash = uptane_repo.add_image(secondary.id, "secondary-update.bin") + + with aktualizr: + aktualizr.wait_for_completion() + + # check the Primary update, must be in pending state since it requires reboot + pending_rev = aktualizr.get_primary_pending_version() + if pending_rev != target_rev: + logger.error("Pending version {} != the target version {}".format(pending_rev, target_rev)) + return False + + # check the Secondary update + current_secondary_image_hash = aktualizr.get_current_image_info(secondary.id) + if current_secondary_image_hash != secondary_update_hash: + logger.error("Current Secondary image {} != expected image {}".format(current_secondary_image_hash, + secondary_update_hash)) + return False + + # emulate reboot and run aktualizr once more + sysroot.update_revision(pending_rev) + aktualizr.emulate_reboot() + + with aktualizr: + aktualizr.wait_for_completion() + + # check the Primary update after reboot + result = director.get_install_result() and (target_rev == aktualizr.get_current_primary_image_info()) + return result + + +""" + Test update of Secondary's OSTree repo if an OSTree target metadata are expired + + Metadata are valid at the moment of a new OSTree revision installation, + but are expired after that and before Secondary is rebooted, + we still expect that the installed update is applied in this case +""" +@with_uptane_backend() +@with_director() +@with_treehub() +@with_sysroot() +@with_secondary(start=False) +@with_aktualizr(start=False, run_mode='once', output_logs=True) +def test_secondary_ostree_update_if_metadata_expires(uptane_repo, secondary, aktualizr, director, sysroot, treehub, **kwargs): + target_rev = treehub.revision + expires_within_sec = 10 + + # add an OSTree update for Secondary + uptane_repo.add_ostree_target(secondary.id, target_rev, expires_within_sec=expires_within_sec) + start_time = time.time() + + with secondary: + with aktualizr: + aktualizr.wait_for_completion() + + # check the Primary update, must be in pending state since it requires reboot + pending_rev = aktualizr.get_current_pending_image_info(secondary.id) + if pending_rev != target_rev: + logger.error("Pending version {} != the target version {}".format(pending_rev, target_rev)) + return False + + # wait until the target metadata are expired + time.sleep(max(0, expires_within_sec - (time.time() - start_time))) + + # emulate reboot and run aktualizr once more + sysroot.update_revision(pending_rev) + secondary.emulate_reboot() + + with secondary: + # Wait for Secondary to initialize. wait_for_completion won't work; it + # times out. + time.sleep(5) + with aktualizr: + aktualizr.wait_for_completion() + + # check the Secondary update after reboot + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + installed_rev = aktualizr.get_current_image_info(secondary.id) + if installed_rev != target_rev: + logger.error("Installed version {} != the target version {}".format(installed_rev, target_rev)) + return False + + return True + + +""" + Test update of Primary's OSTree repo if an OSTree target metadata are expired + + Metadata are valid at the moment of a new OSTree revision installation, + but are expired after that and before Primary is rebooted, + we still expect that the installed update is applied in this case +""" +@with_uptane_backend() +@with_director() +@with_treehub() +@with_sysroot() +@with_aktualizr(start=False, run_mode='once', output_logs=True) +def test_primary_ostree_update_if_metadata_expires(uptane_repo, aktualizr, director, sysroot, treehub, **kwargs): + target_rev = treehub.revision + expires_within_sec = 10 + + # add an OSTree update for Primary + uptane_repo.add_ostree_target(aktualizr.id, target_rev, expires_within_sec=expires_within_sec) + start_time = time.time() + + with aktualizr: + aktualizr.wait_for_completion() + + # check the Primary update, must be in pending state since it requires reboot + pending_rev = aktualizr.get_primary_pending_version() + if pending_rev != target_rev: + logger.error("Pending version {} != the target version {}".format(pending_rev, target_rev)) + return False + + # wait until the target metadata are expired + time.sleep(max(0, expires_within_sec - (time.time() - start_time))) + + # emulate reboot and run aktualizr once more + sysroot.update_revision(pending_rev) + aktualizr.emulate_reboot() + + with aktualizr: + aktualizr.wait_for_completion() + + # check the Primary update after reboot + if not director.get_install_result(): + logger.error("Installation result is not successful") + return False + + installed_rev = aktualizr.get_current_primary_image_info() + if installed_rev != target_rev: + logger.error("Installed version {} != the target version {}".format(installed_rev, target_rev)) + return False + + return True + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + + parser = argparse.ArgumentParser(description='Test backend failure') + parser.add_argument('-b', '--build-dir', help='build directory', default='build') + parser.add_argument('-s', '--src-dir', help='source directory', default='.') + + input_params = parser.parse_args() + + KeyStore.base_dir = input_params.src_dir + initial_cwd = getcwd() + chdir(input_params.build_dir) + + test_suite = [ + test_primary_ostree_secondary_file_updates, + test_secondary_ostree_update_if_metadata_expires, + test_primary_ostree_update_if_metadata_expires + ] + + with TestRunner(test_suite) as runner: + test_suite_run_result = runner.run() + + chdir(initial_cwd) + exit(0 if test_suite_run_result else 1) diff --git a/tests/test_ostree_custom_uri.py b/tests/test_ostree_custom_uri.py new file mode 100755 index 0000000000..9ab509e5ff --- /dev/null +++ b/tests/test_ostree_custom_uri.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 + +import logging +import argparse +from functools import wraps + +from os import getcwd, chdir + +from test_fixtures import KeyStore, with_aktualizr, with_uptane_backend, with_director, with_imagerepo, \ + with_sysroot, TestRunner, Treehub + + +logger = logging.getLogger(__file__) + + +class CustomUriTreehub(Treehub): + prefix = '/mycustomuri' + + def __init__(self, ifc, port, client_handler_map={}): + self.hits_with_prefix = 0 + super(CustomUriTreehub, self).__init__(ifc=ifc, port=port, client_handler_map=client_handler_map) + + class Handler(Treehub.Handler): + def default_get(self): + if self.path.startswith(CustomUriTreehub.prefix): + self.server.hits_with_prefix += 1 + self.path = self.path[len(CustomUriTreehub.prefix):] + + super(CustomUriTreehub.Handler, self).default_get() + + +def with_custom_treehub_uri(handlers=[], port=0): + def decorator(test): + @wraps(test) + def wrapper(*args, **kwargs): + def func(handler_map={}): + with CustomUriTreehub('localhost', port=port, client_handler_map=handler_map) as treehub: + return test(*args, **kwargs, treehub=treehub) + + if handlers and len(handlers) > 0: + for handler in handlers: + result = func(handler.map(kwargs.get('test_path', ''))) + if not result: + break + else: + result = func() + return result + return wrapper + return decorator + + +""" +Test setting a custom URI in OSTree +""" +@with_uptane_backend(start_generic_server=True) +@with_director() +@with_custom_treehub_uri() +@with_sysroot() +@with_aktualizr(start=False, run_mode='once', output_logs=True) +def test_ostree_custom_uri(uptane_repo, aktualizr, director, sysroot, + treehub, **kwargs): + target_rev = treehub.revision + # add an OSTree update with a custom URI + uptane_repo.add_ostree_target(aktualizr.id, target_rev, target_uri=treehub.base_url + '/mycustomuri') + + with aktualizr: + aktualizr.wait_for_completion() + + if treehub.hits_with_prefix <= 5: + logger.error("Didn't fetch from custom URI. Got %d hits with %s prefix", + treehub.hits_with_prefix, CustomUriTreehub.prefix) + return False + return True + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + + parser = argparse.ArgumentParser(description='Test backend failure') + parser.add_argument('-b', '--build-dir', help='build directory', default='build') + parser.add_argument('-s', '--src-dir', help='source directory', default='.') + + input_params = parser.parse_args() + + KeyStore.base_dir = input_params.src_dir + initial_cwd = getcwd() + chdir(input_params.build_dir) + + test_suite = [ + test_ostree_custom_uri, + ] + + with TestRunner(test_suite) as runner: + test_suite_run_result = runner.run() + + chdir(initial_cwd) + exit(0 if test_suite_run_result else 1) diff --git a/tests/test_treehub_failure.py b/tests/test_treehub_failure.py new file mode 100755 index 0000000000..388a8706e0 --- /dev/null +++ b/tests/test_treehub_failure.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 + +import logging +import argparse + +from os import getcwd, chdir +from test_fixtures import KeyStore, with_uptane_backend, with_path, with_director, with_aktualizr, \ + with_sysroot, with_treehub, DownloadInterruptionHandler, MalformedImageHandler, TestRunner + +logger = logging.getLogger(__file__) + + +""" + Verifies whether aktualizr is updatable after failure of object(s) download from Treehub/OSTree repo + with follow-up successful download. + + Currently, it's tested against two types of object download failure: + - download interruption - object download is interrupted once, after that it's successful + - malformed object - object download is successful but it's malformed. It happens once after that it's successful +""" +@with_uptane_backend(start_generic_server=True) +@with_director() +@with_treehub(handlers=[ + DownloadInterruptionHandler(url='/objects/41/5ce9717fc7a5f4d743a4f911e11bd3ed83930e46756303fd13a3eb7ed35892.filez'), + MalformedImageHandler(url='/objects/41/5ce9717fc7a5f4d743a4f911e11bd3ed83930e46756303fd13a3eb7ed35892.filez'), + + # TODO: OSTree object download is not resilient to `Slow Retrieval Attack` + # https://saeljira.it.here.com/browse/OTA-3737 + #SlowRetrievalHandler(url='/objects/6b/1604b586fcbe052bbc0bd9e1c8040f62e085ca2e228f37df957ac939dff361.filez'), + + # TODO: Limit a number of HTTP redirects with OSTree fetches (currently not possible) + #RedirectHandler(number_of_redirects=1000, url='/objects/41/5ce9717fc7a5f4d743a4f911e11bd3ed83930e46756303fd13a3eb7ed35892.filez') +]) +@with_sysroot() +@with_aktualizr(start=False, run_mode='once') +def test_treehub_update_after_image_download_failure(uptane_repo, + aktualizr, + director, + uptane_server, + sysroot, treehub): + target_rev = treehub.revision + uptane_repo.add_ostree_target(aktualizr.id, target_rev) + with aktualizr: + aktualizr.wait_for_completion() + + pending_rev = aktualizr.get_primary_pending_version() + if pending_rev != target_rev: + logger.error("Pending version {} != the target one {}".format(pending_rev, target_rev)) + return False + + sysroot.update_revision(pending_rev) + aktualizr.emulate_reboot() + + with aktualizr: + aktualizr.wait_for_completion() + + result = director.get_install_result() and (target_rev == aktualizr.get_current_primary_image_info()) + return result + + +""" + Verifies that aktualizr does not install an image which contains files with wrong checksums +""" +@with_uptane_backend(start_generic_server=True) +@with_director() +@with_treehub(handlers=[ + MalformedImageHandler(url='/objects/41/5ce9717fc7a5f4d743a4f911e11bd3ed83930e46756303fd13a3eb7ed35892.filez', + number_of_failures=-1, fake_filez=True), + +]) +@with_sysroot() +@with_aktualizr(start=False, run_mode='once', output_logs=True) +def test_treehub_update_if_bad_ostree_checksum(uptane_repo, + aktualizr, + director, + uptane_server, + sysroot, treehub): + target_rev = treehub.revision + uptane_repo.add_ostree_target(aktualizr.id, target_rev) + with aktualizr: + aktualizr.wait_for_completion() + + pending_rev = aktualizr.get_primary_pending_version() + if pending_rev == target_rev: + logger.error("Pending version {} == the target one {}".format(pending_rev, target_rev)) + return False + return True + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + + parser = argparse.ArgumentParser(description='Test backend failure') + parser.add_argument('-b', '--build-dir', help='build directory', default='build') + parser.add_argument('-s', '--src-dir', help='source directory', default='.') + input_params = parser.parse_args() + + KeyStore.base_dir = input_params.src_dir + initial_cwd = getcwd() + chdir(input_params.build_dir) + + test_suite = [ + test_treehub_update_after_image_download_failure, + test_treehub_update_if_bad_ostree_checksum + ] + + with TestRunner(test_suite) as runner: + test_suite_run_result = runner.run() + + chdir(initial_cwd) + exit(0 if test_suite_run_result else 1) diff --git a/tests/test_update.py b/tests/test_update.py deleted file mode 100755 index ffcfe1328e..0000000000 --- a/tests/test_update.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python3 - -import logging -import argparse - -from os import getcwd, chdir - -from test_fixtures import KeyStore, with_aktualizr, with_uptane_backend, with_secondary, with_director, with_imagerepo,\ - with_sysroot, with_treehub - - -logger = logging.getLogger(__file__) - - -""" - Test update of Primary and Secondary if their package manager differs, `ostree` and `fake` respectively - - Aktualizr/Primary's package manager is set to `ostree` - Secondary's package manager is set to `fake` - Primary goal is to verify whether aktualizr succeeds with a binary/fake update of secondary - while aktualizr/primary is configured with ostree package manager -""" -@with_uptane_backend(start_generic_server=True) -@with_director() -@with_treehub() -@with_sysroot() -@with_secondary(start=True) -@with_aktualizr(start=False, run_mode='once', output_logs=True) -def test_primary_ostree_secondary_fake_updates(uptane_repo, secondary, aktualizr, director, - uptane_server, sysroot, treehub): - target_rev = treehub.revision - # add an ostree update for Primary - uptane_repo.add_ostree_target(aktualizr.id, target_rev) - # add a fake/binary update for Secondary - secondary_update_hash = uptane_repo.add_image(secondary.id, "secondary-update.bin") - - with aktualizr: - aktualizr.wait_for_completion() - - # check the Primary update, must be in pending state since it requires reboot - pending_rev = aktualizr.get_primary_pending_version() - if pending_rev != target_rev: - logger.error("Pending version {} != the target one {}".format(pending_rev, target_rev)) - return False - - # check the Secondary update - current_secondary_image_hash = aktualizr.get_current_image_info(secondary.id) - if current_secondary_image_hash != secondary_update_hash: - logger.error("Secondary current image {} != {} expected one".format(current_secondary_image_hash, - secondary_update_hash)) - return False - - # emulate reboot and run aktualizr once more - sysroot.update_revision(pending_rev) - aktualizr.emulate_reboot() - - with aktualizr: - aktualizr.wait_for_completion() - - # check the Primary update after reboot - result = director.get_install_result() and (target_rev == aktualizr.get_current_primary_image_info()) - return result - - -if __name__ == "__main__": - logging.basicConfig(level=logging.INFO) - - parser = argparse.ArgumentParser(description='Test backend failure') - parser.add_argument('-b', '--build-dir', help='build directory', default='build') - parser.add_argument('-s', '--src-dir', help='source directory', default='.') - - input_params = parser.parse_args() - - KeyStore.base_dir = input_params.src_dir - initial_cwd = getcwd() - chdir(input_params.build_dir) - - test_suite = [ - test_primary_ostree_secondary_fake_updates - ] - - test_suite_run_result = True - for test in test_suite: - logger.info('>>> Running {}...'.format(test.__name__)) - test_run_result = test() - logger.info('>>> {}: {}'.format('OK' if test_run_result else 'Failed', test.__name__)) - test_suite_run_result = test_suite_run_result and test_run_result - - chdir(initial_cwd) - exit(0 if test_suite_run_result else 1) - diff --git a/tests/test_utils.cc b/tests/test_utils.cc index 51ea6a01b9..cc2ee2d1c7 100644 --- a/tests/test_utils.cc +++ b/tests/test_utils.cc @@ -9,6 +9,7 @@ #endif #include #include +#include #include #include @@ -18,7 +19,7 @@ #include "logging/logging.h" -std::string TestUtils::getFreePort() { +in_port_t TestUtils::getFreePortAsInt() { int s = socket(AF_INET, SOCK_STREAM, 0); if (s == -1) { std::cout << "socket() failed: " << errno; @@ -41,9 +42,11 @@ std::string TestUtils::getFreePort() { throw std::runtime_error("getsockname failed"); } close(s); - return std::to_string(ntohs(sa.sin_port)); + return sa.sin_port; } +std::string TestUtils::getFreePort() { return std::to_string(ntohs(getFreePortAsInt())); } + void TestUtils::writePathToConfig(const boost::filesystem::path &toml_in, const boost::filesystem::path &toml_out, const boost::filesystem::path &storage_path) { // Append our temp_dir path as storage.path to the config file. This is a hack diff --git a/tests/test_utils.h b/tests/test_utils.h index d548924700..8972900a4e 100644 --- a/tests/test_utils.h +++ b/tests/test_utils.h @@ -10,6 +10,7 @@ struct TestUtils { static std::string getFreePort(); + static in_port_t getFreePortAsInt(); static void writePathToConfig(const boost::filesystem::path &toml_in, const boost::filesystem::path &toml_out, const boost::filesystem::path &storage_path); static void waitForServer(const std::string &address); diff --git a/tests/tuf-test-vectors b/tests/tuf-test-vectors index 6b4d2bb8f6..b0fb682ff4 160000 --- a/tests/tuf-test-vectors +++ b/tests/tuf-test-vectors @@ -1 +1 @@ -Subproject commit 6b4d2bb8f6adbb623caf54e50d89e28f231e8ae4 +Subproject commit b0fb682ff4ee140d04873880f4ca5121ae4253f6 diff --git a/tests/tuf/root-with-extra-keys.json b/tests/tuf/root-with-extra-keys.json new file mode 100644 index 0000000000..55f8fe746e --- /dev/null +++ b/tests/tuf/root-with-extra-keys.json @@ -0,0 +1,78 @@ +{ + "signatures" : [ + { + "method" : "rsassa-pss", + "sig" : "VCU/2ReXgCik29oHS64C/8Y8T0w4mMWjHEGQUG+P7Vi0ONXOZiCP0GclBZlqYDouEWFG9BfR9/EwQFLKxWCn6F7OOwDz7aks1Waj/XhtDjFAGflKhEUozCFMYqL5eHIAa9Ml7gvlUj5jav0UOCL4NIaX+a5YMmRQen3X8yMLnCh+y9fzXedN+uauK0LA+PuN4Cy1xDji9tcmsPWio3dXO2InnXSNRbCGIO7cSvdid7pCnMqtT0ifPbwyiYf53F0XOToVtiZYbP5QdhATbPk+CmGmIwC57gEZhWpoQQDETNhcN/9PuTC8s3c/dwVwLm1gqSmbAkiSCFn44tau/wzJIw==", + "keyid" : "a364662762f9194703b38e1354385bfcbcc0d515aa2bb6fd7f29055b2c329c16" + } + ], + "signed" : { + "version" : 1, + "expires" : "2024-07-12T18:25:27Z", + "_type" : "Root", + "keys" : { + "a364662762f9194703b38e1354385bfcbcc0d515aa2bb6fd7f29055b2c329c16" : { + "keytype" : "RSA", + "keyval" : { + "public" : "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0kno6A9+7AhJmrbrDFGS\nzhW1CoCoFeuZdCJBCT9eWY5irCS9quZZgQYCZpYVESetscYmMMxNJ484TgZ+Y1IF\nk8JMN5ViX7rE+08j4yQgWIw7tQ0BetJfoKcCtb0OxgdQeinz8IqZsCFXPOinXOBq\nwoefNwex0qg6kAWCtKEMzPuexWx532S7mInah+lVXIWpAGy80VhSTYUveQebSJph\nOoJLsYZ6NFgKPZLACDpLbtA3sjn5lbc/wZabD/VmFR0DzdLzZxrG0sCD47eTFchh\n/EghNEllDX0bpoou/uw8MYZmIaxVz19YvdHd6uueSY7s4x3XTz2trVVezGg4UMXs\nYwIDAQAB\n-----END PUBLIC KEY-----\n" + } + }, + "d0194abb8d060f8feec3bcdef1e6f3d0a79360861a8829e2fdf08577c23886c6" : { + "keyval" : { + "public" : "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArdn5kLKstcuiHr8elk3I\nDm1NmGWA1msmf2BgjsUmFn4qco2AV60r92So/I1a1iuyDNuYiziQgBy8xw8YaZTR\nAI/TfddqA91egzUG1OMGxTSal4VAIRSBCJzTXVImMjx0tiZXwaBzUSWVqkioab6P\niBqcGlvdSgaAt45fyag+ywZ1LLMAw8FGEsHwyzIIyHd1Go1Vj9dx7wnsGMfULR3K\n3j8aZ0KsZGlQG5bJjdA0hS5KEzROcFCti77Jb0Rnx0dmQ7k74ZE97JuGcdNgC6mX\n4JVc4jwOpeOZ6tbIxyXD+S0F1wz1tt5Q/SHFhl9xAw67ckxxPZjl1+ohFwBES2NC\nyQIDAQAB\n-----END PUBLIC KEY-----\n" + }, + "keytype" : "RSA" + }, + "dd1c125ff349d4010ecc1ee9dc609170aefed0e2e25598c55a6b15dd10ebc1e8" : { + "keyval" : { + "public" : "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0GCQv8G9/a8XU8Bj8Mk/\nUXYvFMQZqp2vHMl6zGFqxg8e9tPKddeZPLFqATTg8GzF+H1bpNUnm5oM6/Y/bu23\nvFesj3NbbC3aXAv7A4qHDdiwEk/mhnZIwEjkLxkEnoApe/blmU1R5A482iZBU/77\nTnlNDDvKmFbk+3HUrQcbJyx7YKyx3ojj016rSbHB+cBuv1tMGtvyO5gnU47YaPtk\nfB2/RIDcpYbXd6FXHzB6ozbyn+yv4RKb33hnx8flGeX9duVn3Doi5Bhtk19yAP2I\nKrKlWytQ7OKG5+hC9fW9XhGcbDMHHg2g6FEdIfOuuzl3Gf4khI8TBU25u2k7BIHB\nMwIDAQAB\n-----END PUBLIC KEY-----\n" + }, + "keytype" : "RSA" + }, + "bd66606a0cfac1da915a24265d7b0813d7542b0e7969b24d4e01da33b57cb4b4" : { + "keyval" : { + "public" : "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtU2X7bwUcGtN63pEXVoq\n+ghbyqofMmSqeAnaBRjErZcUiSp2pi6QLnlNxz+xo2HmsrsMJZLIx14KGlRI8TyY\nXMur4uk6L2gOCxlIsA9xCS0OvwFgomDyn5E1Z7Pf4CYSxwTsYZ3IvAy1NmS8gTbo\nE8Zc5pQH6BpiHJZmhHoHaJXkk+yZzPSNiNEB6fK+7d32pztUgmd+qYUrlY+v1ray\nuvW7rafXW+7OROJlAtFXvzl0S5nUotBg52cvomJHVgYqkM2tPrUYD+ztEdV9iugF\nu4tnjez1I19cINyU6dR+a0ovP+S/zIsOent59tGo0Jql6GgjDjl6KoiE9XzlYa5F\nyQIDAQAB\n-----END PUBLIC KEY-----\n" + }, + "keytype" : "RSA" + }, + "b82160f2f756d876797fb95c29b5e19aa2c6a54fa7d50e83c30911fae779ca21" : { + "keyval" : { + "public" : "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnY8d45f3VdyeomMTikok\n/u6EFlH7I3Ut6pm1RNY3/aPsRPhWbN/kVOUDKqcQ5j+4Hlea5Sk+JP26Jhe+JlKf\n/vHMZm4yCIQTKF6yohL7z3RuT6Zwmx+e+5yrkFtqoi7bs4deBA4NoxzdMxxwD9g/\nikudErpoinfPNftNRTOX7th06D7dXi97jDv8tj0ujN7gmyAT2kBlifUdtXUYcSH1\n9CYX4nUHJpLmSkjWi5G4eYHwpnFPDankhljFEiP8rr0Zok/fQI57k3ISj6ljpcj5\nEi1q4KWnEIn5rrE9YelDUP4axWieSEKYEbgarnmKUX/5x/KaLgrMYf8qWiyPZ779\n6QIDAQAB\n-----END PUBLIC KEY-----\n" + }, + "keytype" : "RSA" + } + }, + "roles" : { + "offline" : { + "threshold" : 1, + "keyids" : [ + "dd1c125ff349d4010ecc1ee9dc609170aefed0e2e25598c55a6b15dd10ebc1e8" + ] + }, + "root" : { + "threshold" : 1, + "keyids" : [ + "a364662762f9194703b38e1354385bfcbcc0d515aa2bb6fd7f29055b2c329c16" + ] + }, + "targets" : { + "threshold" : 1, + "keyids" : [ + "bd66606a0cfac1da915a24265d7b0813d7542b0e7969b24d4e01da33b57cb4b4" + ] + }, + "timestamp" : { + "keyids" : [ + "b82160f2f756d876797fb95c29b5e19aa2c6a54fa7d50e83c30911fae779ca21" + ], + "threshold" : 1 + }, + "snapshot" : { + "threshold" : 1, + "keyids" : [ + "d0194abb8d060f8feec3bcdef1e6f3d0a79360861a8829e2fdf08577c23886c6" + ] + } + } + } +} diff --git a/tests/uptane_repo_generation/CMakeLists.txt b/tests/uptane_repo_generation/CMakeLists.txt index 11bdaa4c41..197c73343f 100644 --- a/tests/uptane_repo_generation/CMakeLists.txt +++ b/tests/uptane_repo_generation/CMakeLists.txt @@ -1,4 +1,4 @@ add_custom_target(uptane_repo_full_no_correlation_id - COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/full_no_correlation_id.sh - ${UPTANE_GENERATOR} ${PROJECT_BINARY_DIR}/uptane_repos/full_no_correlation_id) + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/generate_repo.sh + ${UPTANE_GENERATOR} ${PROJECT_BINARY_DIR}/uptane_repos/full_no_correlation_id --add_default_secondary) add_dependencies(uptane_repo_full_no_correlation_id uptane-generator) diff --git a/tests/uptane_repo_generation/delegation_basic.sh b/tests/uptane_repo_generation/delegation_basic.sh index 297dcb0174..918e4d437d 100755 --- a/tests/uptane_repo_generation/delegation_basic.sh +++ b/tests/uptane_repo_generation/delegation_basic.sh @@ -30,11 +30,12 @@ echo "secondary" > "$SECONDARY_FIRMWARE" if [[ "$REVOKE" = "revoke" ]]; then uptane_gen --command revokedelegation --dname new-role else - uptane_gen --command generate --expires 2021-07-04T16:33:27Z + uptane_gen --command generate --expires 2025-07-04T16:33:27Z uptane_gen --command adddelegation --dname new-role --dpattern "abc/*" --keytype ed25519 uptane_gen --command image --filename "$PRIMARY_FIRMWARE" --targetname primary.txt --hwid primary_hw uptane_gen --command image --filename "$SECONDARY_FIRMWARE" --targetname "abc/secondary.txt" --dname new-role --hwid secondary_hw uptane_gen --command addtarget --hwid primary_hw --serial CA:FE:A6:D2:84:9D --targetname primary.txt - uptane_gen --command addtarget --hwid secondary_hw --serial secondary_ecu_serial --targetname "abc/secondary.txt" + # TODO: implement delegation support in ManagedSecondary. + #uptane_gen --command addtarget --hwid secondary_hw --serial secondary_ecu_serial --targetname "abc/secondary.txt" uptane_gen --command signtargets fi diff --git a/tests/uptane_repo_generation/delegation_nested.sh b/tests/uptane_repo_generation/delegation_nested.sh index f471b8a42b..d3d8318a62 100755 --- a/tests/uptane_repo_generation/delegation_nested.sh +++ b/tests/uptane_repo_generation/delegation_nested.sh @@ -15,7 +15,7 @@ fi uptane_gen() { - echo "$UPTANE_GENERATOR --path $DEST_DIR $@" + echo "$UPTANE_GENERATOR --path $DEST_DIR" "$@" "$UPTANE_GENERATOR" --path "$DEST_DIR" "$@" } @@ -33,7 +33,7 @@ if [[ "$REVOKE" = "revoke" ]]; then uptane_gen --command revokedelegation --dname role-abc else echo "NORMAL" - uptane_gen --command generate --expires 2021-07-04T16:33:27Z + uptane_gen --command generate --expires 2025-07-04T16:33:27Z uptane_gen --command adddelegation --dname delegation-top --dpattern "ab*" uptane_gen --command adddelegation --dname role-abc --dpattern "abc/*" --dparent delegation-top uptane_gen --command adddelegation --dname role-bcd --dpattern "bcd/*" --dparent delegation-top @@ -52,6 +52,7 @@ else uptane_gen --command image --targetname "cde/target1" --dname role-cde --targetsha256 40c1fb5a90ea02744126187dc8372f9a289c59f1af4afd9855fd2285f9648bb3 --targetsha512 671718e0c9025135aba25bca6b794920cee047a8031e1f955d5c4d82072422467af5d367243f4113d1b9ca79321091f738e68f27f136f633a5fc9cd6f430c689 --targetlength 100 --hwid secondary_hw uptane_gen --command image --targetname "def/target0" --dname role-def --targetsha256 40c1fb5a90ea02744126187dc8372f9a289c59f1af4afd9855fd2285f9648bb3 --targetsha512 671718e0c9025135aba25bca6b794920cee047a8031e1f955d5c4d82072422467af5d367243f4113d1b9ca79321091f738e68f27f136f633a5fc9cd6f430c689 --targetlength 100 --hwid secondary_hw uptane_gen --command addtarget --hwid primary_hw --serial CA:FE:A6:D2:84:9D --targetname primary.txt - uptane_gen --command addtarget --hwid secondary_hw --serial secondary_ecu_serial --targetname "abc/secondary.txt" + # TODO: implement delegation support in ManagedSecondary. + #uptane_gen --command addtarget --hwid secondary_hw --serial secondary_ecu_serial --targetname "abc/secondary.txt" uptane_gen --command signtargets fi diff --git a/tests/uptane_repo_generation/full_no_correlation_id.sh b/tests/uptane_repo_generation/full_no_correlation_id.sh deleted file mode 100755 index 161d0750ad..0000000000 --- a/tests/uptane_repo_generation/full_no_correlation_id.sh +++ /dev/null @@ -1,36 +0,0 @@ -#! /bin/bash -set -eEuo pipefail - -if [ "$#" -ne 2 ]; then - echo "Usage: $0 " - exit 1 -fi - -UPTANE_GENERATOR="$1" -DEST_DIR="$2" - -uptane_gen() { - "$UPTANE_GENERATOR" --path "$DEST_DIR" "$@" -} - -if [ -d "$DEST_DIR" ]; then - # already here, bailing - exit 0 -fi - -mkdir -p "$DEST_DIR" -trap 'rm -rf "$DEST_DIR"' ERR - -IMAGES=$(mktemp -d) -trap 'rm -rf "$IMAGES"' exit -PRIMARY_FIRMWARE="$IMAGES/primary.txt" -echo "primary" > "$PRIMARY_FIRMWARE" -SECONDARY_FIRMWARE="$IMAGES/secondary.txt" -echo "secondary" > "$SECONDARY_FIRMWARE" - -uptane_gen --command generate --expires 2021-07-04T16:33:27Z -uptane_gen --command image --filename "$PRIMARY_FIRMWARE" --targetname primary.txt --hwid primary_hw -uptane_gen --command image --filename "$SECONDARY_FIRMWARE" --targetname secondary.txt --hwid secondary_hw -uptane_gen --command addtarget --hwid primary_hw --serial CA:FE:A6:D2:84:9D --targetname primary.txt -uptane_gen --command addtarget --hwid secondary_hw --serial secondary_ecu_serial --targetname secondary.txt -uptane_gen --command signtargets diff --git a/tests/uptane_repo_generation/generate_repo.sh b/tests/uptane_repo_generation/generate_repo.sh new file mode 100755 index 0000000000..f15ff0880d --- /dev/null +++ b/tests/uptane_repo_generation/generate_repo.sh @@ -0,0 +1,62 @@ +#! /bin/bash +set -eEuo pipefail + +if [ "$#" -eq 0 ]; then + echo "Usage: $0 [options]" + echo "Options:" + echo " -s|--add_default_secondary" + echo " -c|--add_campaigns" + exit 1 +fi + +ADD_DEFAULT_SECONDARY=false +ADD_CAMPAIGNS=false + +for arg in "$@" +do + case $arg in + -s|--add_default_secondary) + ADD_DEFAULT_SECONDARY=true + ;; + -c|--add_campaigns) + ADD_CAMPAIGNS=true + ;; + esac +done + +UPTANE_GENERATOR="$1" +DEST_DIR="$2" + +uptane_gen() { + "$UPTANE_GENERATOR" --path "$DEST_DIR" "$@" +} + +if [ -d "$DEST_DIR" ]; then + # already here, bailing + exit 0 +fi + +mkdir -p "$DEST_DIR" +trap 'rm -rf "$DEST_DIR"' ERR + +IMAGES=$(mktemp -d) +trap 'rm -rf "$IMAGES"' exit +PRIMARY_FIRMWARE="$IMAGES/primary.txt" +echo "primary" > "$PRIMARY_FIRMWARE" + +uptane_gen --command generate --expires 2025-07-04T16:33:27Z +uptane_gen --command image --filename "$PRIMARY_FIRMWARE" --targetname primary.txt --hwid primary_hw +uptane_gen --command addtarget --hwid primary_hw --serial CA:FE:A6:D2:84:9D --targetname primary.txt + +if [ "$ADD_DEFAULT_SECONDARY" = true ]; then + SECONDARY_FIRMWARE="$IMAGES/secondary.txt" + echo "secondary" > "$SECONDARY_FIRMWARE" + uptane_gen --command image --filename "$SECONDARY_FIRMWARE" --targetname secondary.txt --hwid secondary_hw + uptane_gen --command addtarget --hwid secondary_hw --serial secondary_ecu_serial --targetname secondary.txt +fi + +uptane_gen --command signtargets + +if [ "$ADD_CAMPAIGNS" = true ]; then + uptane_gen --command addcampaigns +fi diff --git a/tests/uptane_test_common.h b/tests/uptane_test_common.h index 77d26e449f..7cd3f5fb3e 100644 --- a/tests/uptane_test_common.h +++ b/tests/uptane_test_common.h @@ -5,27 +5,18 @@ #include #include "json/json.h" +#include -#include "config/config.h" +#include "libaktualizr/config.h" +#include "libaktualizr/aktualizr.h" #include "uptane/tuf.h" #include "utilities/utils.h" #include "virtualsecondary.h" #include "primary/sotauptaneclient.h" -#include "primary/aktualizr.h" static const char* sec_public_key = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyjUeAzozBEccaGFAJ2Q3\n9QBfItH5i5O7yLRjZlKcEnWnFsxAWHUn5W/msRgZN/pXUrlax0wvrvMvHHLwZA2J\nz+UQApzSqj53HPVAcCH6kB9x0r9PM/0vVTKtmcrdSHj7jJ2yAW2T4Vo/eKlpvz3w\n9kTPAj0j1f5LvUgX5VIjUnsQK1LGzMwnleHk2dkWeWnt3OqomnO7V5C0jkDi58tG\nJ6fnyCYWcMUbpMaldXVXqmQ+iBkWxBjZ99+XJSRjdsskC7x8u8t+sA146VDB977r\nN8D+i+P1tAe810crciUqpYNenDYx47aAm6gaDWr7oeDzp3HyCjx4dZi9Z85rVE36\n8wIDAQAB\n-----END PUBLIC KEY-----\n"; static const char* sec_private_key = "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAyjUeAzozBEccaGFAJ2Q39QBfItH5i5O7yLRjZlKcEnWnFsxA\nWHUn5W/msRgZN/pXUrlax0wvrvMvHHLwZA2Jz+UQApzSqj53HPVAcCH6kB9x0r9P\nM/0vVTKtmcrdSHj7jJ2yAW2T4Vo/eKlpvz3w9kTPAj0j1f5LvUgX5VIjUnsQK1LG\nzMwnleHk2dkWeWnt3OqomnO7V5C0jkDi58tGJ6fnyCYWcMUbpMaldXVXqmQ+iBkW\nxBjZ99+XJSRjdsskC7x8u8t+sA146VDB977rN8D+i+P1tAe810crciUqpYNenDYx\n47aAm6gaDWr7oeDzp3HyCjx4dZi9Z85rVE368wIDAQABAoIBAA0WlxS6Zab3O11+\nPfrOv9h5566HTNG+BD+ffXeYDUYcm24cVmXjX2u4bIQ1/RvkdlaCbN/NjKCUWQ5M\nWkb/oVX1i62/nNssI+WZ8kvPxzog7usnOucwkim/mAEGYoBYZF/brTPudc32W3lh\n7dhVGA24snWAo5ssVJax3eoYAPVLqFK5Pb8VUxpHtjERMDDUxM3w6WGXLxuBdA5s\n5vIdv+XrdiQhdPn1HMYEBBInkkYK8w4UytOCAS1/3xfVi2QwX5H9bHkduFpjLSQt\n2StWR9Kh4I80xXp7FwGpfkdUn+3qj5WwneuGY/JnD7AzjDlAThj0AE9iaYjkzXKJ\nVD4ULmECgYEA+UGQ1aglftFuTO427Xmi7tHhooo9U1pKMrg5CkCLkA+MudFzMEgj\npRtDdj8lTTWHEIYQXo5hhZfhk63j89RAKRz1MDFOvgknE8yJa9rSyCAEcwzRzXcY\n3WtWozEZ+5u4KYFHhGjJCSqVFdwyXmjP9ldb35Uxh06OuTbdNkSbiUsCgYEAz62t\nJ1EftTkd/YA/9Poq1deil5g0btPXnMJMj7C99dexNAXuVhS10Rz1Hi74wCFEbkcV\nGL/8U80pER9YYYeFUmqs1pYu7zwcYBT+iNrvFaPifid8FqlJEJ727swnWdpzXpwv\n/6q0h3JXU2odrEMNaGqiPycHQ/45EWMbCtpSs/kCgYEAwjMgWicA17bqvkuXRhzQ\nIkwqBU65ixi82JmJ73/sfNhwp1IV8hcylnAQdq+qK2a6Ddi2JkW+m6yDF2GTSiUj\nvCSQr/SqygsthBKHOx4pvbycWtsxF2lkWRdJUCpweQWRTd0o0HQntdmUgIyoPcBh\nzyevMBr4lNhTAOFLJv37RNMCgYAQq+ODjXqbJKuopvv7YX3Azt+phbln0C+10M8u\nlcSaEKeUAongdScnU0jGFIU5fzIsHB6wbvEFlSmfy0FgCu4D8LZRP5si71Njzyuj\ntteMiCxtbiQC+bH42JoAD3l1OBkc1jLwNjbpzJ7//jvFkVhpMm413Z8ysRzJrYgF\nNgN/mQKBgQDNT2nFoqanlQPkZekqNQNcVMHPyOWP40z4HC5JD1Z5F18Kg3El4EdS\nNfwaFGRT5qiFJBmmzl+6EFmUrrBNtV01zQ6rO+xgy2Y7qUQMNAUMjh1cCpWwUlN0\ng4aT/RawS5WpWN3+lEs4Ouxpgg4ZStXNZRJkSDHwZpkXtFfKzsEXaA==\n-----END RSA PRIVATE KEY-----\n"; -std::shared_ptr newTestClient(Config &config_in, - std::shared_ptr storage_in, - std::shared_ptr http_client_in, - std::shared_ptr events_channel_in = nullptr) { - std::shared_ptr bootloader_in = std::make_shared(config_in.bootloader, *storage_in); - std::shared_ptr report_queue_in = std::make_shared(config_in, http_client_in); - return std::make_shared(config_in, storage_in, http_client_in, bootloader_in, - report_queue_in, events_channel_in); -} - struct UptaneTestCommon { class TestAktualizr: public Aktualizr { @@ -53,10 +44,8 @@ struct UptaneTestCommon { TestUptaneClient(Config &config_in, std::shared_ptr storage_in, std::shared_ptr http_client, - std::shared_ptr bootloader_in, - std::shared_ptr report_queue_in, - std::shared_ptr events_channel_in = nullptr): - SotaUptaneClient(config_in, storage_in, http_client, bootloader_in, report_queue_in, events_channel_in) { + std::shared_ptr events_channel_in): + SotaUptaneClient(config_in, storage_in, http_client, events_channel_in) { if (boost::filesystem::exists(config_in.uptane.secondary_config_file)) { for (const auto& item : Primary::VirtualSecondaryConfig::create_from_file(config_in.uptane.secondary_config_file)) { @@ -64,36 +53,34 @@ struct UptaneTestCommon { } } } - }; - static std::shared_ptr newTestClient(Config &config_in, - std::shared_ptr storage_in, - std::shared_ptr http_client_in, - std::shared_ptr events_channel_in = nullptr) { - - std::shared_ptr bootloader_in = std::make_shared(config_in.bootloader, *storage_in); - std::shared_ptr report_queue_in = std::make_shared(config_in, http_client_in); + TestUptaneClient(Config &config_in, + std::shared_ptr storage_in, + std::shared_ptr http_client) : TestUptaneClient(config_in, storage_in, http_client, nullptr) {} - return std::make_shared(config_in, storage_in, http_client_in, bootloader_in, - report_queue_in, events_channel_in); - } + TestUptaneClient(Config &config_in, + std::shared_ptr storage_in) : TestUptaneClient(config_in, storage_in, std::make_shared()) {} + }; static Primary::VirtualSecondaryConfig addDefaultSecondary(Config& config, const TemporaryDirectory& temp_dir, const std::string& serial, const std::string& hw_id, bool hardcoded_keys = true) { - Primary::VirtualSecondaryConfig ecu_config; + const boost::filesystem::path sec_dir = temp_dir / boost::filesystem::unique_path(); + Utils::createDirectories(sec_dir, S_IRWXU); + Primary::VirtualSecondaryConfig ecu_config; ecu_config.partial_verifying = false; - ecu_config.full_client_dir = temp_dir.Path(); + ecu_config.full_client_dir = sec_dir; ecu_config.ecu_serial = serial; ecu_config.ecu_hardware_id = hw_id; ecu_config.ecu_private_key = "sec.priv"; ecu_config.ecu_public_key = "sec.pub"; - ecu_config.firmware_path = temp_dir / "firmware.txt"; - ecu_config.target_name_path = temp_dir / "firmware_name.txt"; - ecu_config.metadata_path = temp_dir / "secondary_metadata"; + ecu_config.firmware_path = sec_dir / "firmware.txt"; + ecu_config.target_name_path = sec_dir / "firmware_name.txt"; + ecu_config.metadata_path = sec_dir / "secondary_metadata"; - config.uptane.secondary_config_file = temp_dir / boost::filesystem::unique_path() / "virtual_secondary_conf.json"; + // Create or append to the Secondary config file. + config.uptane.secondary_config_file = temp_dir / "virtual_secondary_conf.json"; ecu_config.dump(config.uptane.secondary_config_file); if (hardcoded_keys) { @@ -104,6 +91,28 @@ struct UptaneTestCommon { return ecu_config; } + static Primary::VirtualSecondaryConfig altVirtualConfiguration(const boost::filesystem::path& client_dir) { + const boost::filesystem::path sec_dir = client_dir / boost::filesystem::unique_path(); + Utils::createDirectories(sec_dir, S_IRWXU); + + Primary::VirtualSecondaryConfig ecu_config; + ecu_config.partial_verifying = false; + ecu_config.full_client_dir = sec_dir; + ecu_config.ecu_serial = "ecuserial3"; + ecu_config.ecu_hardware_id = "hw_id3"; + ecu_config.ecu_private_key = "sec.priv"; + ecu_config.ecu_public_key = "sec.pub"; + ecu_config.firmware_path = sec_dir / "firmware.txt"; + ecu_config.target_name_path = sec_dir / "firmware_name.txt"; + ecu_config.metadata_path = sec_dir / "secondary_metadata"; + + // store hard-coded keys to make the tests run WAY faster + Utils::writeFile((ecu_config.full_client_dir / ecu_config.ecu_private_key), std::string(sec_private_key)); + Utils::writeFile((ecu_config.full_client_dir / ecu_config.ecu_public_key), std::string(sec_public_key)); + + return ecu_config; + } + static Config makeTestConfig(const TemporaryDirectory& temp_dir, const std::string& url) { Config conf("tests/config/basic.toml"); conf.uptane.director_server = url + "/director"; @@ -112,6 +121,8 @@ struct UptaneTestCommon { conf.provision.primary_ecu_serial = "CA:FE:A6:D2:84:9D"; conf.provision.primary_ecu_hardware_id = "primary_hw"; conf.storage.path = temp_dir.Path(); + conf.import.base_path = temp_dir.Path() / "import"; + conf.pacman.images_path = temp_dir.Path() / "images"; conf.tls.server = url; conf.bootloader.reboot_sentinel_dir = temp_dir.Path(); UptaneTestCommon::addDefaultSecondary(conf, temp_dir, "secondary_ecu_serial", "secondary_hw"); @@ -128,6 +139,21 @@ struct UptaneTestCommon { packages_to_install.emplace_back(serial, ot_json); return packages_to_install; } + + static void verifyEcus(TemporaryDirectory& temp_dir, std::vector expected_ecus) { + const Json::Value ecu_data = Utils::parseJSONFile(temp_dir / "post.json"); + EXPECT_EQ(ecu_data["ecus"].size(), expected_ecus.size()); + for (const Json::Value& ecu : ecu_data["ecus"]) { + auto found = std::find(expected_ecus.begin(), expected_ecus.end(), ecu["ecu_serial"].asString()); + if (found != expected_ecus.end()) { + expected_ecus.erase(found); + } else { + FAIL() << "Unknown ECU in registration data: " << ecu["ecu_serial"].asString(); + } + } + EXPECT_EQ(expected_ecus.size(), 0); + } + }; #endif // UPTANE_TEST_COMMON_H_ diff --git a/tests/uptane_vector_tests.cc b/tests/uptane_vector_tests.cc index 30e53cc28a..81e1f1348b 100644 --- a/tests/uptane_vector_tests.cc +++ b/tests/uptane_vector_tests.cc @@ -1,31 +1,40 @@ #include -#include -#include -#include #include #include #include #include -#include "config/config.h" #include "http/httpclient.h" +#include "libaktualizr/config.h" #include "logging/logging.h" #include "primary/sotauptaneclient.h" #include "storage/invstorage.h" #include "utilities/utils.h" -std::string address; +using std::string; + +string address; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) +string tests_path; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables) class VectorWrapper { public: - VectorWrapper(Json::Value vector) : vector_(std::move(vector)) {} + explicit VectorWrapper(Json::Value vector) : vector_(std::move(vector)) {} bool matchError(const Uptane::Exception& e) { - if (vector_["director"]["update"]["err_msg"].asString() == e.what() || - vector_["director"]["targets"][e.getName()]["err_msg"].asString() == e.what() || - vector_["image_repo"]["update"]["err_msg"].asString() == e.what() || - vector_["image_repo"]["targets"][e.getName()]["err_msg"].asString() == e.what()) { + auto me = [this, &e](const string r) { + if (vector_[r]["update"]["err_msg"].asString() == e.what()) { + return true; + } + const Json::Value& targets = vector_[r]["targets"]; + for (Json::Value::const_iterator it = targets.begin(); it != targets.end(); it++) { + if ((*it)["err_msg"].asString() == e.what()) { + return true; + } + } + return false; + }; + if (me("director") || me("image_repo")) { return true; } std::cout << "aktualizr failed with unmatched exception " << typeid(e).name() << ": " << e.what() << "\n"; @@ -64,7 +73,7 @@ class VectorWrapper { std::cout << "exception from image_repo: '" << vector_["image_repo"]["update"]["err"] << " with message: " << vector_["image_repo"]["update"]["err_msg"] << "\n"; } else { - std::cout << "an exception while fetching targets metadata.\n"; + std::cout << "an exception while fetching Targets metadata.\n"; } } @@ -72,7 +81,26 @@ class VectorWrapper { Json::Value vector_; }; -class UptaneVector : public ::testing::TestWithParam {}; +class UptaneVector : public ::testing::TestWithParam {}; + +class HttpWrapper : public HttpClient { + public: + HttpResponse post(const string& url, const string& content_type, const string& data) override { + if (url.find("/devices") != string::npos) { + LOG_TRACE << " HttpWrapper intercepting device registration"; + return {Utils::readFile(tests_path + "/test_data/cred.p12"), 200, CURLE_OK, ""}; + } + + if (url.find("/director/ecus") != string::npos) { + LOG_TRACE << " HttpWrapper intercepting Uptane ECU registration"; + return {"", 200, CURLE_OK, ""}; + } + + LOG_TRACE << "HttpWrapper letting " << url << " pass"; + return HttpClient::post(url, content_type, data); + } + HttpResponse post(const string& url, const Json::Value& data) override { return HttpClient::post(url, data); } +}; /** * Check that aktualizr fails on expired metadata. @@ -81,33 +109,39 @@ class UptaneVector : public ::testing::TestWithParam {}; * RecordProperty("zephyr_key", "REQ-153,TST-52"); */ TEST_P(UptaneVector, Test) { - const std::string test_name = GetParam(); + const string test_name = GetParam(); std::cout << "Running test vector " << test_name << "\n"; TemporaryDirectory temp_dir; Config config; config.provision.primary_ecu_serial = "test_primary_ecu_serial"; config.provision.primary_ecu_hardware_id = "test_primary_hardware_id"; + config.provision.provision_path = tests_path + "/test_data/cred.zip"; + config.provision.mode = ProvisionMode::kSharedCredReuse; config.uptane.director_server = address + test_name + "/director"; config.uptane.repo_server = address + test_name + "/image_repo"; config.storage.path = temp_dir.Path(); - config.storage.uptane_metadata_path = BasedPath(temp_dir.Path() / "metadata"); - config.pacman.type = PackageManager::kNone; + config.storage.uptane_metadata_path = utils::BasedPath(temp_dir.Path() / "metadata"); + config.pacman.images_path = temp_dir.Path() / "images"; + config.pacman.type = PACKAGE_MANAGER_NONE; + config.postUpdateValues(); logger_set_threshold(boost::log::trivial::trace); auto storage = INvStorage::newStorage(config.storage); - Uptane::Manifest uptane_manifest{config, storage}; - auto uptane_client = SotaUptaneClient::newDefaultClient(config, storage); - Uptane::EcuSerial ecu_serial(config.provision.primary_ecu_serial); - Uptane::HardwareIdentifier hw_id(config.provision.primary_ecu_hardware_id); - uptane_client->hw_ids.insert(std::make_pair(ecu_serial, hw_id)); + auto http_client = std::make_shared(); + auto uptane_client = std_::make_unique(config, storage, http_client, nullptr); + auto ecu_serial = uptane_client->provisioner_.PrimaryEcuSerial(); + auto hw_id = uptane_client->provisioner_.PrimaryHardwareIdentifier(); + EXPECT_EQ(ecu_serial.ToString(), config.provision.primary_ecu_serial); + EXPECT_EQ(hw_id.ToString(), config.provision.primary_ecu_hardware_id); Uptane::EcuMap ecu_map{{ecu_serial, hw_id}}; - Uptane::Target target("test_filename", ecu_map, {{Uptane::Hash::Type::kSha256, "sha256"}}, 1, ""); + Uptane::Target target("test_filename", ecu_map, {{Hash::Type::kSha256, "sha256"}}, 1, ""); storage->saveInstalledVersion(ecu_serial.ToString(), target, InstalledVersionUpdateMode::kCurrent); - HttpClient http_client; + uptane_client->initialize(); + ASSERT_TRUE(uptane_client->attemptProvision()) << "Provisioning Failed. Can't continue test"; while (true) { - HttpResponse response = http_client.post(address + test_name + "/step", Json::Value()); + HttpResponse response = http_client->post(address + test_name + "/step", Json::Value()); if (response.http_status_code == 204) { return; } @@ -118,22 +152,22 @@ TEST_P(UptaneVector, Test) { bool should_fail = vector.shouldFail(); try { - /* Fetch metadata from the director. - * Check metadata from the director. + /* Fetch metadata from the Director. + * Check metadata from the Director. * Identify targets for known ECUs. - * Fetch metadata from the images repo. - * Check metadata from the images repo. + * Fetch metadata from the Image repo. + * Check metadata from the Image repo. * * It would be simpler to just call fetchMeta() here, but that calls * putManifestSimple(), which will fail here. */ - if (!uptane_client->uptaneIteration(nullptr, nullptr)) { - ASSERT_TRUE(should_fail) << "uptaneIteration unexpectedly failed."; - throw uptane_client->getLastException(); - } + uptane_client->uptaneIteration(nullptr, nullptr); + result::UpdateCheck updates = uptane_client->checkUpdates(); if (updates.status == result::UpdateStatus::kError) { ASSERT_TRUE(should_fail) << "checkUpdates unexpectedly failed."; - throw uptane_client->getLastException(); + if (uptane_client->getLastException() != nullptr) { + std::rethrow_exception(uptane_client->getLastException()); + } } if (updates.ecus_count > 0) { /* Download a binary package. @@ -141,7 +175,9 @@ TEST_P(UptaneVector, Test) { result::Download result = uptane_client->downloadImages(updates.updates); if (result.status != result::DownloadStatus::kSuccess) { ASSERT_TRUE(should_fail) << "downloadImages unexpectedly failed."; - throw uptane_client->getLastException(); + if (uptane_client->getLastException() != nullptr) { + std::rethrow_exception(uptane_client->getLastException()); + } } } @@ -160,10 +196,10 @@ TEST_P(UptaneVector, Test) { FAIL() << "Step sequence unexpectedly aborted."; } -std::vector GetVectors() { +std::vector GetVectors() { HttpClient http_client; const Json::Value json_vectors = http_client.get(address, HttpInterface::kNoLimit).getJson(); - std::vector vectors; + std::vector vectors; for (Json::ValueConstIterator it = json_vectors.begin(); it != json_vectors.end(); it++) { vectors.emplace_back((*it).asString()); } @@ -176,7 +212,7 @@ int main(int argc, char* argv[]) { logger_init(); logger_set_threshold(boost::log::trivial::trace); - if (argc < 2) { + if (argc < 3) { std::cerr << "This program is intended to be run from run_vector_tests.sh!\n"; return 1; } @@ -184,8 +220,10 @@ int main(int argc, char* argv[]) { /* Use ports to distinguish both the server connection and local storage so * that parallel runs of this code don't cause problems that are difficult to * debug. */ - const std::string port = argv[1]; - address = "http://127.0.0.1:" + port + "/"; + const string port = argv[1]; + address = "http://localhost:" + port + "/"; + + tests_path = argv[2]; ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); diff --git a/third_party/HdrHistogram_c b/third_party/HdrHistogram_c deleted file mode 160000 index 07d8de82df..0000000000 --- a/third_party/HdrHistogram_c +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 07d8de82dfcf64d4a42c85790e0b69656f3834e2 diff --git a/third_party/googletest b/third_party/googletest index 8ffb7e5c88..e2239ee604 160000 --- a/third_party/googletest +++ b/third_party/googletest @@ -1 +1 @@ -Subproject commit 8ffb7e5c88b20a297a2e786c480556467496463b +Subproject commit e2239ee6043f73722e7aa812a459f54a28552929 diff --git a/third_party/isotp-c b/third_party/isotp-c deleted file mode 160000 index c2ef71266e..0000000000 --- a/third_party/isotp-c +++ /dev/null @@ -1 +0,0 @@ -Subproject commit c2ef71266ef2e9aaec15dcf2a39fb354b79391eb diff --git a/third_party/jsoncpp b/third_party/jsoncpp new file mode 160000 index 0000000000..6aba23f4a8 --- /dev/null +++ b/third_party/jsoncpp @@ -0,0 +1 @@ +Subproject commit 6aba23f4a8628d599a9ef7fa4811c4ff6e4070e2 diff --git a/third_party/jsoncpp/json/json-forwards.h b/third_party/jsoncpp/json/json-forwards.h deleted file mode 100644 index 28a6ad7d66..0000000000 --- a/third_party/jsoncpp/json/json-forwards.h +++ /dev/null @@ -1,249 +0,0 @@ -/// Json-cpp amalgated forward header (http://jsoncpp.sourceforge.net/). -/// It is intented to be used with #include -/// This header provides forward declaration for all JsonCpp types. - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: LICENSE -// ////////////////////////////////////////////////////////////////////// - -/* -The JsonCpp library's source code, including accompanying documentation, -tests and demonstration applications, are licensed under the following -conditions... - -The author (Baptiste Lepilleur) explicitly disclaims copyright in all -jurisdictions which recognize such a disclaimer. In such jurisdictions, -this software is released into the Public Domain. - -In jurisdictions which do not recognize Public Domain property (e.g. Germany as of -2010), this software is Copyright (c) 2007-2010 by Baptiste Lepilleur, and is -released under the terms of the MIT License (see below). - -In jurisdictions which recognize Public Domain property, the user of this -software may choose to accept it either as 1) Public Domain, 2) under the -conditions of the MIT License (see below), or 3) under the terms of dual -Public Domain/MIT License conditions described here, as they choose. - -The MIT License is about as close to Public Domain as a license can get, and is -described in clear, concise terms at: - - http://en.wikipedia.org/wiki/MIT_License - -The full text of the MIT License follows: - -======================================================================== -Copyright (c) 2007-2010 Baptiste Lepilleur - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, copy, -modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -======================================================================== -(END LICENSE TEXT) - -The MIT license is compatible with both the GPL and commercial -software, affording one all of the rights of Public Domain with the -minor nuisance of being required to keep the above copyright notice -and license text in the source code. Note also that by accepting the -Public Domain "license" you can re-license your copy using whatever -license you like. - -*/ - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: LICENSE -// ////////////////////////////////////////////////////////////////////// - - - - - -#ifndef JSON_FORWARD_AMALGATED_H_INCLUDED -# define JSON_FORWARD_AMALGATED_H_INCLUDED -/// If defined, indicates that the source file is amalgated -/// to prevent private header inclusion. -#define JSON_IS_AMALGATED - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: include/json/config.h -// ////////////////////////////////////////////////////////////////////// - -// Copyright 2007-2010 Baptiste Lepilleur -// Distributed under MIT license, or public domain if desired and -// recognized in your jurisdiction. -// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE - -#ifndef JSON_CONFIG_H_INCLUDED -# define JSON_CONFIG_H_INCLUDED - -/// If defined, indicates that json library is embedded in CppTL library. -//# define JSON_IN_CPPTL 1 - -/// If defined, indicates that json may leverage CppTL library -//# define JSON_USE_CPPTL 1 -/// If defined, indicates that cpptl vector based map should be used instead of std::map -/// as Value container. -//# define JSON_USE_CPPTL_SMALLMAP 1 -/// If defined, indicates that Json specific container should be used -/// (hash table & simple deque container with customizable allocator). -/// THIS FEATURE IS STILL EXPERIMENTAL! There is know bugs: See #3177332 -//# define JSON_VALUE_USE_INTERNAL_MAP 1 -/// Force usage of standard new/malloc based allocator instead of memory pool based allocator. -/// The memory pools allocator used optimization (initializing Value and ValueInternalLink -/// as if it was a POD) that may cause some validation tool to report errors. -/// Only has effects if JSON_VALUE_USE_INTERNAL_MAP is defined. -//# define JSON_USE_SIMPLE_INTERNAL_ALLOCATOR 1 - -/// If defined, indicates that Json use exception to report invalid type manipulation -/// instead of C assert macro. -# define JSON_USE_EXCEPTION 1 - -/// If defined, indicates that the source file is amalgated -/// to prevent private header inclusion. -/// Remarks: it is automatically defined in the generated amalgated header. -// #define JSON_IS_AMALGAMATION - - -# ifdef JSON_IN_CPPTL -# include -# ifndef JSON_USE_CPPTL -# define JSON_USE_CPPTL 1 -# endif -# endif - -# ifdef JSON_IN_CPPTL -# define JSON_API CPPTL_API -# elif defined(JSON_DLL_BUILD) -# define JSON_API __declspec(dllexport) -# elif defined(JSON_DLL) -# define JSON_API __declspec(dllimport) -# else -# define JSON_API -# endif - -// If JSON_NO_INT64 is defined, then Json only support C++ "int" type for integer -// Storages, and 64 bits integer support is disabled. -// #define JSON_NO_INT64 1 - -#if defined(_MSC_VER) && _MSC_VER <= 1200 // MSVC 6 -// Microsoft Visual Studio 6 only support conversion from __int64 to double -// (no conversion from unsigned __int64). -#define JSON_USE_INT64_DOUBLE_CONVERSION 1 -#endif // if defined(_MSC_VER) && _MSC_VER < 1200 // MSVC 6 - -#if defined(_MSC_VER) && _MSC_VER >= 1500 // MSVC 2008 -/// Indicates that the following function is deprecated. -# define JSONCPP_DEPRECATED(message) __declspec(deprecated(message)) -#endif - -#if !defined(JSONCPP_DEPRECATED) -# define JSONCPP_DEPRECATED(message) -#endif // if !defined(JSONCPP_DEPRECATED) - -namespace Json { - typedef int Int; - typedef unsigned int UInt; -# if defined(JSON_NO_INT64) - typedef int LargestInt; - typedef unsigned int LargestUInt; -# undef JSON_HAS_INT64 -# else // if defined(JSON_NO_INT64) - // For Microsoft Visual use specific types as long long is not supported -# if defined(_MSC_VER) // Microsoft Visual Studio - typedef __int64 Int64; - typedef unsigned __int64 UInt64; -# else // if defined(_MSC_VER) // Other platforms, use long long - typedef long long int Int64; - typedef unsigned long long int UInt64; -# endif // if defined(_MSC_VER) - typedef Int64 LargestInt; - typedef UInt64 LargestUInt; -# define JSON_HAS_INT64 -# endif // if defined(JSON_NO_INT64) -} // end namespace Json - - -#endif // JSON_CONFIG_H_INCLUDED - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: include/json/config.h -// ////////////////////////////////////////////////////////////////////// - - - - - - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: include/json/forwards.h -// ////////////////////////////////////////////////////////////////////// - -// Copyright 2007-2010 Baptiste Lepilleur -// Distributed under MIT license, or public domain if desired and -// recognized in your jurisdiction. -// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE - -#ifndef JSON_FORWARDS_H_INCLUDED -# define JSON_FORWARDS_H_INCLUDED - -#if !defined(JSON_IS_AMALGAMATION) -# include "config.h" -#endif // if !defined(JSON_IS_AMALGAMATION) - -namespace Json { - - // writer.h - class FastWriter; - class StyledWriter; - - // reader.h - class Reader; - - // features.h - class Features; - - // value.h - typedef unsigned int ArrayIndex; - class StaticString; - class Path; - class PathArgument; - class Value; - class ValueIteratorBase; - class ValueIterator; - class ValueConstIterator; -#ifdef JSON_VALUE_USE_INTERNAL_MAP - class ValueMapAllocator; - class ValueInternalLink; - class ValueInternalArray; - class ValueInternalMap; -#endif // #ifdef JSON_VALUE_USE_INTERNAL_MAP - -} // namespace Json - - -#endif // JSON_FORWARDS_H_INCLUDED - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: include/json/forwards.h -// ////////////////////////////////////////////////////////////////////// - - - - - -#endif //ifndef JSON_FORWARD_AMALGATED_H_INCLUDED diff --git a/third_party/jsoncpp/json/json.h b/third_party/jsoncpp/json/json.h deleted file mode 100644 index d104f5a401..0000000000 --- a/third_party/jsoncpp/json/json.h +++ /dev/null @@ -1,1855 +0,0 @@ -/// Json-cpp amalgated header (http://jsoncpp.sourceforge.net/). -/// It is intented to be used with #include - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: LICENSE -// ////////////////////////////////////////////////////////////////////// - -/* -The JsonCpp library's source code, including accompanying documentation, -tests and demonstration applications, are licensed under the following -conditions... - -The author (Baptiste Lepilleur) explicitly disclaims copyright in all -jurisdictions which recognize such a disclaimer. In such jurisdictions, -this software is released into the Public Domain. - -In jurisdictions which do not recognize Public Domain property (e.g. Germany as of -2010), this software is Copyright (c) 2007-2010 by Baptiste Lepilleur, and is -released under the terms of the MIT License (see below). - -In jurisdictions which recognize Public Domain property, the user of this -software may choose to accept it either as 1) Public Domain, 2) under the -conditions of the MIT License (see below), or 3) under the terms of dual -Public Domain/MIT License conditions described here, as they choose. - -The MIT License is about as close to Public Domain as a license can get, and is -described in clear, concise terms at: - - http://en.wikipedia.org/wiki/MIT_License - -The full text of the MIT License follows: - -======================================================================== -Copyright (c) 2007-2010 Baptiste Lepilleur - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, copy, -modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -======================================================================== -(END LICENSE TEXT) - -The MIT license is compatible with both the GPL and commercial -software, affording one all of the rights of Public Domain with the -minor nuisance of being required to keep the above copyright notice -and license text in the source code. Note also that by accepting the -Public Domain "license" you can re-license your copy using whatever -license you like. - -*/ - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: LICENSE -// ////////////////////////////////////////////////////////////////////// - - - - - -#ifndef JSON_AMALGATED_H_INCLUDED -# define JSON_AMALGATED_H_INCLUDED -/// If defined, indicates that the source file is amalgated -/// to prevent private header inclusion. -#define JSON_IS_AMALGATED - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: include/json/config.h -// ////////////////////////////////////////////////////////////////////// - -// Copyright 2007-2010 Baptiste Lepilleur -// Distributed under MIT license, or public domain if desired and -// recognized in your jurisdiction. -// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE - -#ifndef JSON_CONFIG_H_INCLUDED -# define JSON_CONFIG_H_INCLUDED - -/// If defined, indicates that json library is embedded in CppTL library. -//# define JSON_IN_CPPTL 1 - -/// If defined, indicates that json may leverage CppTL library -//# define JSON_USE_CPPTL 1 -/// If defined, indicates that cpptl vector based map should be used instead of std::map -/// as Value container. -//# define JSON_USE_CPPTL_SMALLMAP 1 -/// If defined, indicates that Json specific container should be used -/// (hash table & simple deque container with customizable allocator). -/// THIS FEATURE IS STILL EXPERIMENTAL! There is know bugs: See #3177332 -//# define JSON_VALUE_USE_INTERNAL_MAP 1 -/// Force usage of standard new/malloc based allocator instead of memory pool based allocator. -/// The memory pools allocator used optimization (initializing Value and ValueInternalLink -/// as if it was a POD) that may cause some validation tool to report errors. -/// Only has effects if JSON_VALUE_USE_INTERNAL_MAP is defined. -//# define JSON_USE_SIMPLE_INTERNAL_ALLOCATOR 1 - -/// If defined, indicates that Json use exception to report invalid type manipulation -/// instead of C assert macro. -# define JSON_USE_EXCEPTION 1 - -/// If defined, indicates that the source file is amalgated -/// to prevent private header inclusion. -/// Remarks: it is automatically defined in the generated amalgated header. -#define JSON_IS_AMALGAMATION - - -# ifdef JSON_IN_CPPTL -# include -# ifndef JSON_USE_CPPTL -# define JSON_USE_CPPTL 1 -# endif -# endif - -# ifdef JSON_IN_CPPTL -# define JSON_API CPPTL_API -# elif defined(JSON_DLL_BUILD) -# define JSON_API __declspec(dllexport) -# elif defined(JSON_DLL) -# define JSON_API __declspec(dllimport) -# else -# define JSON_API -# endif - -// If JSON_NO_INT64 is defined, then Json only support C++ "int" type for integer -// Storages, and 64 bits integer support is disabled. -// #define JSON_NO_INT64 1 - -#if defined(_MSC_VER) && _MSC_VER <= 1200 // MSVC 6 -// Microsoft Visual Studio 6 only support conversion from __int64 to double -// (no conversion from unsigned __int64). -#define JSON_USE_INT64_DOUBLE_CONVERSION 1 -#endif // if defined(_MSC_VER) && _MSC_VER < 1200 // MSVC 6 - -#if defined(_MSC_VER) && _MSC_VER >= 1500 // MSVC 2008 -/// Indicates that the following function is deprecated. -# define JSONCPP_DEPRECATED(message) __declspec(deprecated(message)) -#endif - -#if !defined(JSONCPP_DEPRECATED) -# define JSONCPP_DEPRECATED(message) -#endif // if !defined(JSONCPP_DEPRECATED) - -namespace Json { - typedef int Int; - typedef unsigned int UInt; -# if defined(JSON_NO_INT64) - typedef int LargestInt; - typedef unsigned int LargestUInt; -# undef JSON_HAS_INT64 -# else // if defined(JSON_NO_INT64) - // For Microsoft Visual use specific types as long long is not supported -# if defined(_MSC_VER) // Microsoft Visual Studio - typedef __int64 Int64; - typedef unsigned __int64 UInt64; -# else // if defined(_MSC_VER) // Other platforms, use long long - typedef long long int Int64; - typedef unsigned long long int UInt64; -# endif // if defined(_MSC_VER) - typedef Int64 LargestInt; - typedef UInt64 LargestUInt; -# define JSON_HAS_INT64 -# endif // if defined(JSON_NO_INT64) -} // end namespace Json - - -#endif // JSON_CONFIG_H_INCLUDED - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: include/json/config.h -// ////////////////////////////////////////////////////////////////////// - - - - - - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: include/json/forwards.h -// ////////////////////////////////////////////////////////////////////// - -// Copyright 2007-2010 Baptiste Lepilleur -// Distributed under MIT license, or public domain if desired and -// recognized in your jurisdiction. -// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE - -#ifndef JSON_FORWARDS_H_INCLUDED -# define JSON_FORWARDS_H_INCLUDED - -#if !defined(JSON_IS_AMALGAMATION) -# include "config.h" -#endif // if !defined(JSON_IS_AMALGAMATION) - -namespace Json { - - // writer.h - class FastWriter; - class StyledWriter; - - // reader.h - class Reader; - - // features.h - class Features; - - // value.h - typedef unsigned int ArrayIndex; - class StaticString; - class Path; - class PathArgument; - class Value; - class ValueIteratorBase; - class ValueIterator; - class ValueConstIterator; -#ifdef JSON_VALUE_USE_INTERNAL_MAP - class ValueMapAllocator; - class ValueInternalLink; - class ValueInternalArray; - class ValueInternalMap; -#endif // #ifdef JSON_VALUE_USE_INTERNAL_MAP - -} // namespace Json - - -#endif // JSON_FORWARDS_H_INCLUDED - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: include/json/forwards.h -// ////////////////////////////////////////////////////////////////////// - - - - - - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: include/json/features.h -// ////////////////////////////////////////////////////////////////////// - -// Copyright 2007-2010 Baptiste Lepilleur -// Distributed under MIT license, or public domain if desired and -// recognized in your jurisdiction. -// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE - -#ifndef CPPTL_JSON_FEATURES_H_INCLUDED -# define CPPTL_JSON_FEATURES_H_INCLUDED - -#if !defined(JSON_IS_AMALGAMATION) -# include "json-forwards.h" -#endif // if !defined(JSON_IS_AMALGAMATION) - -namespace Json { - - /** \brief Configuration passed to reader and writer. - * This configuration object can be used to force the Reader or Writer - * to behave in a standard conforming way. - */ - class JSON_API Features - { - public: - /** \brief A configuration that allows all features and assumes all strings are UTF-8. - * - C & C++ comments are allowed - * - Root object can be any JSON value - * - Assumes Value strings are encoded in UTF-8 - */ - static Features all(); - - /** \brief A configuration that is strictly compatible with the JSON specification. - * - Comments are forbidden. - * - Root object must be either an array or an object value. - * - Assumes Value strings are encoded in UTF-8 - */ - static Features strictMode(); - - /** \brief Initialize the configuration like JsonConfig::allFeatures; - */ - Features(); - - /// \c true if comments are allowed. Default: \c true. - bool allowComments_; - - /// \c true if root must be either an array or an object value. Default: \c false. - bool strictRoot_; - }; - -} // namespace Json - -#endif // CPPTL_JSON_FEATURES_H_INCLUDED - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: include/json/features.h -// ////////////////////////////////////////////////////////////////////// - - - - - - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: include/json/value.h -// ////////////////////////////////////////////////////////////////////// - -// Copyright 2007-2010 Baptiste Lepilleur -// Distributed under MIT license, or public domain if desired and -// recognized in your jurisdiction. -// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE - -#ifndef CPPTL_JSON_H_INCLUDED -# define CPPTL_JSON_H_INCLUDED - -#if !defined(JSON_IS_AMALGAMATION) -# include "forwards.h" -#endif // if !defined(JSON_IS_AMALGAMATION) -# include -# include - -# ifndef JSON_USE_CPPTL_SMALLMAP -# include -# else -# include -# endif -# ifdef JSON_USE_CPPTL -# include -# endif - -/** \brief JSON (JavaScript Object Notation). - */ -namespace Json { - - /** \brief Type of the value held by a Value object. - */ - enum ValueType - { - nullValue = 0, ///< 'null' value - intValue, ///< signed integer value - uintValue, ///< unsigned integer value - realValue, ///< double value - stringValue, ///< UTF-8 string value - booleanValue, ///< bool value - arrayValue, ///< array value (ordered list) - objectValue ///< object value (collection of name/value pairs). - }; - - enum CommentPlacement - { - commentBefore = 0, ///< a comment placed on the line before a value - commentAfterOnSameLine, ///< a comment just after a value on the same line - commentAfter, ///< a comment on the line after a value (only make sense for root value) - numberOfCommentPlacement - }; - -//# ifdef JSON_USE_CPPTL -// typedef CppTL::AnyEnumerator EnumMemberNames; -// typedef CppTL::AnyEnumerator EnumValues; -//# endif - - /** \brief Lightweight wrapper to tag static string. - * - * Value constructor and objectValue member assignement takes advantage of the - * StaticString and avoid the cost of string duplication when storing the - * string or the member name. - * - * Example of usage: - * \code - * Json::Value aValue( StaticString("some text") ); - * Json::Value object; - * static const StaticString code("code"); - * object[code] = 1234; - * \endcode - */ - class JSON_API StaticString - { - public: - explicit StaticString( const char *czstring ) - : str_( czstring ) - { - } - - operator const char *() const - { - return str_; - } - - const char *c_str() const - { - return str_; - } - - private: - const char *str_; - }; - - /** \brief Represents a JSON value. - * - * This class is a discriminated union wrapper that can represents a: - * - signed integer [range: Value::minInt - Value::maxInt] - * - unsigned integer (range: 0 - Value::maxUInt) - * - double - * - UTF-8 string - * - boolean - * - 'null' - * - an ordered list of Value - * - collection of name/value pairs (javascript object) - * - * The type of the held value is represented by a #ValueType and - * can be obtained using type(). - * - * values of an #objectValue or #arrayValue can be accessed using operator[]() methods. - * Non const methods will automatically create the a #nullValue element - * if it does not exist. - * The sequence of an #arrayValue will be automatically resize and initialized - * with #nullValue. resize() can be used to enlarge or truncate an #arrayValue. - * - * The get() methods can be used to obtanis default value in the case the required element - * does not exist. - * - * It is possible to iterate over the list of a #objectValue values using - * the getMemberNames() method. - */ - class JSON_API Value - { - friend class ValueIteratorBase; -# ifdef JSON_VALUE_USE_INTERNAL_MAP - friend class ValueInternalLink; - friend class ValueInternalMap; -# endif - public: - typedef std::vector Members; - typedef ValueIterator iterator; - typedef ValueConstIterator const_iterator; - typedef Json::UInt UInt; - typedef Json::Int Int; -# if defined(JSON_HAS_INT64) - typedef Json::UInt64 UInt64; - typedef Json::Int64 Int64; -#endif // defined(JSON_HAS_INT64) - typedef Json::LargestInt LargestInt; - typedef Json::LargestUInt LargestUInt; - typedef Json::ArrayIndex ArrayIndex; - - static const Value null; - /// Minimum signed integer value that can be stored in a Json::Value. - static const LargestInt minLargestInt; - /// Maximum signed integer value that can be stored in a Json::Value. - static const LargestInt maxLargestInt; - /// Maximum unsigned integer value that can be stored in a Json::Value. - static const LargestUInt maxLargestUInt; - - /// Minimum signed int value that can be stored in a Json::Value. - static const Int minInt; - /// Maximum signed int value that can be stored in a Json::Value. - static const Int maxInt; - /// Maximum unsigned int value that can be stored in a Json::Value. - static const UInt maxUInt; - - /// Minimum signed 64 bits int value that can be stored in a Json::Value. - static const Int64 minInt64; - /// Maximum signed 64 bits int value that can be stored in a Json::Value. - static const Int64 maxInt64; - /// Maximum unsigned 64 bits int value that can be stored in a Json::Value. - static const UInt64 maxUInt64; - - private: -#ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION -# ifndef JSON_VALUE_USE_INTERNAL_MAP - class CZString - { - public: - enum DuplicationPolicy - { - noDuplication = 0, - duplicate, - duplicateOnCopy - }; - CZString( ArrayIndex index ); - CZString( const char *cstr, DuplicationPolicy allocate ); - CZString( const CZString &other ); - ~CZString(); - CZString &operator =( const CZString &other ); - bool operator<( const CZString &other ) const; - bool operator==( const CZString &other ) const; - ArrayIndex index() const; - const char *c_str() const; - bool isStaticString() const; - private: - void swap( CZString &other ); - const char *cstr_; - ArrayIndex index_; - }; - - public: -# ifndef JSON_USE_CPPTL_SMALLMAP - typedef std::map ObjectValues; -# else - typedef CppTL::SmallMap ObjectValues; -# endif // ifndef JSON_USE_CPPTL_SMALLMAP -# endif // ifndef JSON_VALUE_USE_INTERNAL_MAP -#endif // ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION - - public: - /** \brief Create a default Value of the given type. - - This is a very useful constructor. - To create an empty array, pass arrayValue. - To create an empty object, pass objectValue. - Another Value can then be set to this one by assignment. - This is useful since clear() and resize() will not alter types. - - Examples: - \code - Json::Value null_value; // null - Json::Value arr_value(Json::arrayValue); // [] - Json::Value obj_value(Json::objectValue); // {} - \endcode - */ - Value( ValueType type = nullValue ); - Value( Int value ); - Value( UInt value ); -#if defined(JSON_HAS_INT64) - Value( Int64 value ); - Value( UInt64 value ); -#endif // if defined(JSON_HAS_INT64) - Value( double value ); - Value( const char *value ); - Value( const char *beginValue, const char *endValue ); - /** \brief Constructs a value from a static string. - - * Like other value string constructor but do not duplicate the string for - * internal storage. The given string must remain alive after the call to this - * constructor. - * Example of usage: - * \code - * Json::Value aValue( StaticString("some text") ); - * \endcode - */ - Value( const StaticString &value ); - Value( const std::string &value ); -# ifdef JSON_USE_CPPTL - Value( const CppTL::ConstString &value ); -# endif - Value( bool value ); - Value( const Value &other ); - ~Value(); - - Value &operator=( const Value &other ); - /// Swap values. - /// \note Currently, comments are intentionally not swapped, for - /// both logic and efficiency. - void swap( Value &other ); - - ValueType type() const; - - bool operator <( const Value &other ) const; - bool operator <=( const Value &other ) const; - bool operator >=( const Value &other ) const; - bool operator >( const Value &other ) const; - - bool operator ==( const Value &other ) const; - bool operator !=( const Value &other ) const; - - int compare( const Value &other ) const; - - const char *asCString() const; - std::string asString() const; -# ifdef JSON_USE_CPPTL - CppTL::ConstString asConstString() const; -# endif - Int asInt() const; - UInt asUInt() const; - Int64 asInt64() const; - UInt64 asUInt64() const; - LargestInt asLargestInt() const; - LargestUInt asLargestUInt() const; - float asFloat() const; - double asDouble() const; - bool asBool() const; - - bool isNull() const; - bool isBool() const; - bool isInt() const; - bool isUInt() const; - bool isIntegral() const; - bool isDouble() const; - bool isNumeric() const; - bool isString() const; - bool isArray() const; - bool isObject() const; - - bool isConvertibleTo( ValueType other ) const; - - /// Number of values in array or object - ArrayIndex size() const; - - /// \brief Return true if empty array, empty object, or null; - /// otherwise, false. - bool empty() const; - - /// Return isNull() - bool operator!() const; - - /// Remove all object members and array elements. - /// \pre type() is arrayValue, objectValue, or nullValue - /// \post type() is unchanged - void clear(); - - /// Resize the array to size elements. - /// New elements are initialized to null. - /// May only be called on nullValue or arrayValue. - /// \pre type() is arrayValue or nullValue - /// \post type() is arrayValue - void resize( ArrayIndex size ); - - /// Access an array element (zero based index ). - /// If the array contains less than index element, then null value are inserted - /// in the array so that its size is index+1. - /// (You may need to say 'value[0u]' to get your compiler to distinguish - /// this from the operator[] which takes a string.) - Value &operator[]( ArrayIndex index ); - - /// Access an array element (zero based index ). - /// If the array contains less than index element, then null value are inserted - /// in the array so that its size is index+1. - /// (You may need to say 'value[0u]' to get your compiler to distinguish - /// this from the operator[] which takes a string.) - Value &operator[]( int index ); - - /// Access an array element (zero based index ) - /// (You may need to say 'value[0u]' to get your compiler to distinguish - /// this from the operator[] which takes a string.) - const Value &operator[]( ArrayIndex index ) const; - - /// Access an array element (zero based index ) - /// (You may need to say 'value[0u]' to get your compiler to distinguish - /// this from the operator[] which takes a string.) - const Value &operator[]( int index ) const; - - /// If the array contains at least index+1 elements, returns the element value, - /// otherwise returns defaultValue. - Value get( ArrayIndex index, - const Value &defaultValue ) const; - /// Return true if index < size(). - bool isValidIndex( ArrayIndex index ) const; - /// \brief Append value to array at the end. - /// - /// Equivalent to jsonvalue[jsonvalue.size()] = value; - Value &append( const Value &value ); - - /// Access an object value by name, create a null member if it does not exist. - Value &operator[]( const char *key ); - /// Access an object value by name, returns null if there is no member with that name. - const Value &operator[]( const char *key ) const; - /// Access an object value by name, create a null member if it does not exist. - Value &operator[]( const std::string &key ); - /// Access an object value by name, returns null if there is no member with that name. - const Value &operator[]( const std::string &key ) const; - /** \brief Access an object value by name, create a null member if it does not exist. - - * If the object as no entry for that name, then the member name used to store - * the new entry is not duplicated. - * Example of use: - * \code - * Json::Value object; - * static const StaticString code("code"); - * object[code] = 1234; - * \endcode - */ - Value &operator[]( const StaticString &key ); -# ifdef JSON_USE_CPPTL - /// Access an object value by name, create a null member if it does not exist. - Value &operator[]( const CppTL::ConstString &key ); - /// Access an object value by name, returns null if there is no member with that name. - const Value &operator[]( const CppTL::ConstString &key ) const; -# endif - /// Return the member named key if it exist, defaultValue otherwise. - Value get( const char *key, - const Value &defaultValue ) const; - /// Return the member named key if it exist, defaultValue otherwise. - Value get( const std::string &key, - const Value &defaultValue ) const; -# ifdef JSON_USE_CPPTL - /// Return the member named key if it exist, defaultValue otherwise. - Value get( const CppTL::ConstString &key, - const Value &defaultValue ) const; -# endif - /// \brief Remove and return the named member. - /// - /// Do nothing if it did not exist. - /// \return the removed Value, or null. - /// \pre type() is objectValue or nullValue - /// \post type() is unchanged - Value removeMember( const char* key ); - /// Same as removeMember(const char*) - Value removeMember( const std::string &key ); - - /// Return true if the object has a member named key. - bool isMember( const char *key ) const; - /// Return true if the object has a member named key. - bool isMember( const std::string &key ) const; -# ifdef JSON_USE_CPPTL - /// Return true if the object has a member named key. - bool isMember( const CppTL::ConstString &key ) const; -# endif - - /// \brief Return a list of the member names. - /// - /// If null, return an empty list. - /// \pre type() is objectValue or nullValue - /// \post if type() was nullValue, it remains nullValue - Members getMemberNames() const; - -//# ifdef JSON_USE_CPPTL -// EnumMemberNames enumMemberNames() const; -// EnumValues enumValues() const; -//# endif - - /// Comments must be //... or /* ... */ - void setComment( const char *comment, - CommentPlacement placement ); - /// Comments must be //... or /* ... */ - void setComment( const std::string &comment, - CommentPlacement placement ); - bool hasComment( CommentPlacement placement ) const; - /// Include delimiters and embedded newlines. - std::string getComment( CommentPlacement placement ) const; - - std::string toStyledString() const; - - const_iterator begin() const; - const_iterator end() const; - - iterator begin(); - iterator end(); - - private: - Value &resolveReference( const char *key, - bool isStatic ); - -# ifdef JSON_VALUE_USE_INTERNAL_MAP - inline bool isItemAvailable() const - { - return itemIsUsed_ == 0; - } - - inline void setItemUsed( bool isUsed = true ) - { - itemIsUsed_ = isUsed ? 1 : 0; - } - - inline bool isMemberNameStatic() const - { - return memberNameIsStatic_ == 0; - } - - inline void setMemberNameIsStatic( bool isStatic ) - { - memberNameIsStatic_ = isStatic ? 1 : 0; - } -# endif // # ifdef JSON_VALUE_USE_INTERNAL_MAP - - private: - struct CommentInfo - { - CommentInfo(); - ~CommentInfo(); - - void setComment( const char *text ); - - char *comment_; - }; - - //struct MemberNamesTransform - //{ - // typedef const char *result_type; - // const char *operator()( const CZString &name ) const - // { - // return name.c_str(); - // } - //}; - - union ValueHolder - { - LargestInt int_; - LargestUInt uint_; - double real_; - bool bool_; - char *string_; -# ifdef JSON_VALUE_USE_INTERNAL_MAP - ValueInternalArray *array_; - ValueInternalMap *map_; -#else - ObjectValues *map_; -# endif - } value_; - ValueType type_ : 8; - int allocated_ : 1; // Notes: if declared as bool, bitfield is useless. -# ifdef JSON_VALUE_USE_INTERNAL_MAP - unsigned int itemIsUsed_ : 1; // used by the ValueInternalMap container. - int memberNameIsStatic_ : 1; // used by the ValueInternalMap container. -# endif - CommentInfo *comments_; - }; - - - /** \brief Experimental and untested: represents an element of the "path" to access a node. - */ - class PathArgument - { - public: - friend class Path; - - PathArgument(); - PathArgument( ArrayIndex index ); - PathArgument( const char *key ); - PathArgument( const std::string &key ); - - private: - enum Kind - { - kindNone = 0, - kindIndex, - kindKey - }; - std::string key_; - ArrayIndex index_; - Kind kind_; - }; - - /** \brief Experimental and untested: represents a "path" to access a node. - * - * Syntax: - * - "." => root node - * - ".[n]" => elements at index 'n' of root node (an array value) - * - ".name" => member named 'name' of root node (an object value) - * - ".name1.name2.name3" - * - ".[0][1][2].name1[3]" - * - ".%" => member name is provided as parameter - * - ".[%]" => index is provied as parameter - */ - class Path - { - public: - Path( const std::string &path, - const PathArgument &a1 = PathArgument(), - const PathArgument &a2 = PathArgument(), - const PathArgument &a3 = PathArgument(), - const PathArgument &a4 = PathArgument(), - const PathArgument &a5 = PathArgument() ); - - const Value &resolve( const Value &root ) const; - Value resolve( const Value &root, - const Value &defaultValue ) const; - /// Creates the "path" to access the specified node and returns a reference on the node. - Value &make( Value &root ) const; - - private: - typedef std::vector InArgs; - typedef std::vector Args; - - void makePath( const std::string &path, - const InArgs &in ); - void addPathInArg( const std::string &path, - const InArgs &in, - InArgs::const_iterator &itInArg, - PathArgument::Kind kind ); - void invalidPath( const std::string &path, - int location ); - - Args args_; - }; - - - -#ifdef JSON_VALUE_USE_INTERNAL_MAP - /** \brief Allocator to customize Value internal map. - * Below is an example of a simple implementation (default implementation actually - * use memory pool for speed). - * \code - class DefaultValueMapAllocator : public ValueMapAllocator - { - public: // overridden from ValueMapAllocator - virtual ValueInternalMap *newMap() - { - return new ValueInternalMap(); - } - - virtual ValueInternalMap *newMapCopy( const ValueInternalMap &other ) - { - return new ValueInternalMap( other ); - } - - virtual void destructMap( ValueInternalMap *map ) - { - delete map; - } - - virtual ValueInternalLink *allocateMapBuckets( unsigned int size ) - { - return new ValueInternalLink[size]; - } - - virtual void releaseMapBuckets( ValueInternalLink *links ) - { - delete [] links; - } - - virtual ValueInternalLink *allocateMapLink() - { - return new ValueInternalLink(); - } - - virtual void releaseMapLink( ValueInternalLink *link ) - { - delete link; - } - }; - * \endcode - */ - class JSON_API ValueMapAllocator - { - public: - virtual ~ValueMapAllocator(); - virtual ValueInternalMap *newMap() = 0; - virtual ValueInternalMap *newMapCopy( const ValueInternalMap &other ) = 0; - virtual void destructMap( ValueInternalMap *map ) = 0; - virtual ValueInternalLink *allocateMapBuckets( unsigned int size ) = 0; - virtual void releaseMapBuckets( ValueInternalLink *links ) = 0; - virtual ValueInternalLink *allocateMapLink() = 0; - virtual void releaseMapLink( ValueInternalLink *link ) = 0; - }; - - /** \brief ValueInternalMap hash-map bucket chain link (for internal use only). - * \internal previous_ & next_ allows for bidirectional traversal. - */ - class JSON_API ValueInternalLink - { - public: - enum { itemPerLink = 6 }; // sizeof(ValueInternalLink) = 128 on 32 bits architecture. - enum InternalFlags { - flagAvailable = 0, - flagUsed = 1 - }; - - ValueInternalLink(); - - ~ValueInternalLink(); - - Value items_[itemPerLink]; - char *keys_[itemPerLink]; - ValueInternalLink *previous_; - ValueInternalLink *next_; - }; - - - /** \brief A linked page based hash-table implementation used internally by Value. - * \internal ValueInternalMap is a tradional bucket based hash-table, with a linked - * list in each bucket to handle collision. There is an addional twist in that - * each node of the collision linked list is a page containing a fixed amount of - * value. This provides a better compromise between memory usage and speed. - * - * Each bucket is made up of a chained list of ValueInternalLink. The last - * link of a given bucket can be found in the 'previous_' field of the following bucket. - * The last link of the last bucket is stored in tailLink_ as it has no following bucket. - * Only the last link of a bucket may contains 'available' item. The last link always - * contains at least one element unless is it the bucket one very first link. - */ - class JSON_API ValueInternalMap - { - friend class ValueIteratorBase; - friend class Value; - public: - typedef unsigned int HashKey; - typedef unsigned int BucketIndex; - -# ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION - struct IteratorState - { - IteratorState() - : map_(0) - , link_(0) - , itemIndex_(0) - , bucketIndex_(0) - { - } - ValueInternalMap *map_; - ValueInternalLink *link_; - BucketIndex itemIndex_; - BucketIndex bucketIndex_; - }; -# endif // ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION - - ValueInternalMap(); - ValueInternalMap( const ValueInternalMap &other ); - ValueInternalMap &operator =( const ValueInternalMap &other ); - ~ValueInternalMap(); - - void swap( ValueInternalMap &other ); - - BucketIndex size() const; - - void clear(); - - bool reserveDelta( BucketIndex growth ); - - bool reserve( BucketIndex newItemCount ); - - const Value *find( const char *key ) const; - - Value *find( const char *key ); - - Value &resolveReference( const char *key, - bool isStatic ); - - void remove( const char *key ); - - void doActualRemove( ValueInternalLink *link, - BucketIndex index, - BucketIndex bucketIndex ); - - ValueInternalLink *&getLastLinkInBucket( BucketIndex bucketIndex ); - - Value &setNewItem( const char *key, - bool isStatic, - ValueInternalLink *link, - BucketIndex index ); - - Value &unsafeAdd( const char *key, - bool isStatic, - HashKey hashedKey ); - - HashKey hash( const char *key ) const; - - int compare( const ValueInternalMap &other ) const; - - private: - void makeBeginIterator( IteratorState &it ) const; - void makeEndIterator( IteratorState &it ) const; - static bool equals( const IteratorState &x, const IteratorState &other ); - static void increment( IteratorState &iterator ); - static void incrementBucket( IteratorState &iterator ); - static void decrement( IteratorState &iterator ); - static const char *key( const IteratorState &iterator ); - static const char *key( const IteratorState &iterator, bool &isStatic ); - static Value &value( const IteratorState &iterator ); - static int distance( const IteratorState &x, const IteratorState &y ); - - private: - ValueInternalLink *buckets_; - ValueInternalLink *tailLink_; - BucketIndex bucketsSize_; - BucketIndex itemCount_; - }; - - /** \brief A simplified deque implementation used internally by Value. - * \internal - * It is based on a list of fixed "page", each page contains a fixed number of items. - * Instead of using a linked-list, a array of pointer is used for fast item look-up. - * Look-up for an element is as follow: - * - compute page index: pageIndex = itemIndex / itemsPerPage - * - look-up item in page: pages_[pageIndex][itemIndex % itemsPerPage] - * - * Insertion is amortized constant time (only the array containing the index of pointers - * need to be reallocated when items are appended). - */ - class JSON_API ValueInternalArray - { - friend class Value; - friend class ValueIteratorBase; - public: - enum { itemsPerPage = 8 }; // should be a power of 2 for fast divide and modulo. - typedef Value::ArrayIndex ArrayIndex; - typedef unsigned int PageIndex; - -# ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION - struct IteratorState // Must be a POD - { - IteratorState() - : array_(0) - , currentPageIndex_(0) - , currentItemIndex_(0) - { - } - ValueInternalArray *array_; - Value **currentPageIndex_; - unsigned int currentItemIndex_; - }; -# endif // ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION - - ValueInternalArray(); - ValueInternalArray( const ValueInternalArray &other ); - ValueInternalArray &operator =( const ValueInternalArray &other ); - ~ValueInternalArray(); - void swap( ValueInternalArray &other ); - - void clear(); - void resize( ArrayIndex newSize ); - - Value &resolveReference( ArrayIndex index ); - - Value *find( ArrayIndex index ) const; - - ArrayIndex size() const; - - int compare( const ValueInternalArray &other ) const; - - private: - static bool equals( const IteratorState &x, const IteratorState &other ); - static void increment( IteratorState &iterator ); - static void decrement( IteratorState &iterator ); - static Value &dereference( const IteratorState &iterator ); - static Value &unsafeDereference( const IteratorState &iterator ); - static int distance( const IteratorState &x, const IteratorState &y ); - static ArrayIndex indexOf( const IteratorState &iterator ); - void makeBeginIterator( IteratorState &it ) const; - void makeEndIterator( IteratorState &it ) const; - void makeIterator( IteratorState &it, ArrayIndex index ) const; - - void makeIndexValid( ArrayIndex index ); - - Value **pages_; - ArrayIndex size_; - PageIndex pageCount_; - }; - - /** \brief Experimental: do not use. Allocator to customize Value internal array. - * Below is an example of a simple implementation (actual implementation use - * memory pool). - \code -class DefaultValueArrayAllocator : public ValueArrayAllocator -{ -public: // overridden from ValueArrayAllocator - virtual ~DefaultValueArrayAllocator() - { - } - - virtual ValueInternalArray *newArray() - { - return new ValueInternalArray(); - } - - virtual ValueInternalArray *newArrayCopy( const ValueInternalArray &other ) - { - return new ValueInternalArray( other ); - } - - virtual void destruct( ValueInternalArray *array ) - { - delete array; - } - - virtual void reallocateArrayPageIndex( Value **&indexes, - ValueInternalArray::PageIndex &indexCount, - ValueInternalArray::PageIndex minNewIndexCount ) - { - ValueInternalArray::PageIndex newIndexCount = (indexCount*3)/2 + 1; - if ( minNewIndexCount > newIndexCount ) - newIndexCount = minNewIndexCount; - void *newIndexes = realloc( indexes, sizeof(Value*) * newIndexCount ); - if ( !newIndexes ) - throw std::bad_alloc(); - indexCount = newIndexCount; - indexes = static_cast( newIndexes ); - } - virtual void releaseArrayPageIndex( Value **indexes, - ValueInternalArray::PageIndex indexCount ) - { - if ( indexes ) - free( indexes ); - } - - virtual Value *allocateArrayPage() - { - return static_cast( malloc( sizeof(Value) * ValueInternalArray::itemsPerPage ) ); - } - - virtual void releaseArrayPage( Value *value ) - { - if ( value ) - free( value ); - } -}; - \endcode - */ - class JSON_API ValueArrayAllocator - { - public: - virtual ~ValueArrayAllocator(); - virtual ValueInternalArray *newArray() = 0; - virtual ValueInternalArray *newArrayCopy( const ValueInternalArray &other ) = 0; - virtual void destructArray( ValueInternalArray *array ) = 0; - /** \brief Reallocate array page index. - * Reallocates an array of pointer on each page. - * \param indexes [input] pointer on the current index. May be \c NULL. - * [output] pointer on the new index of at least - * \a minNewIndexCount pages. - * \param indexCount [input] current number of pages in the index. - * [output] number of page the reallocated index can handle. - * \b MUST be >= \a minNewIndexCount. - * \param minNewIndexCount Minimum number of page the new index must be able to - * handle. - */ - virtual void reallocateArrayPageIndex( Value **&indexes, - ValueInternalArray::PageIndex &indexCount, - ValueInternalArray::PageIndex minNewIndexCount ) = 0; - virtual void releaseArrayPageIndex( Value **indexes, - ValueInternalArray::PageIndex indexCount ) = 0; - virtual Value *allocateArrayPage() = 0; - virtual void releaseArrayPage( Value *value ) = 0; - }; -#endif // #ifdef JSON_VALUE_USE_INTERNAL_MAP - - - /** \brief base class for Value iterators. - * - */ - class ValueIteratorBase - { - public: - typedef unsigned int size_t; - typedef int difference_type; - typedef ValueIteratorBase SelfType; - - ValueIteratorBase(); -#ifndef JSON_VALUE_USE_INTERNAL_MAP - explicit ValueIteratorBase( const Value::ObjectValues::iterator ¤t ); -#else - ValueIteratorBase( const ValueInternalArray::IteratorState &state ); - ValueIteratorBase( const ValueInternalMap::IteratorState &state ); -#endif - - bool operator ==( const SelfType &other ) const - { - return isEqual( other ); - } - - bool operator !=( const SelfType &other ) const - { - return !isEqual( other ); - } - - difference_type operator -( const SelfType &other ) const - { - return computeDistance( other ); - } - - /// Return either the index or the member name of the referenced value as a Value. - Value key() const; - - /// Return the index of the referenced Value. -1 if it is not an arrayValue. - UInt index() const; - - /// Return the member name of the referenced Value. "" if it is not an objectValue. - const char *memberName() const; - - protected: - Value &deref() const; - - void increment(); - - void decrement(); - - difference_type computeDistance( const SelfType &other ) const; - - bool isEqual( const SelfType &other ) const; - - void copy( const SelfType &other ); - - private: -#ifndef JSON_VALUE_USE_INTERNAL_MAP - Value::ObjectValues::iterator current_; - // Indicates that iterator is for a null value. - bool isNull_; -#else - union - { - ValueInternalArray::IteratorState array_; - ValueInternalMap::IteratorState map_; - } iterator_; - bool isArray_; -#endif - }; - - /** \brief const iterator for object and array value. - * - */ - class ValueConstIterator : public ValueIteratorBase - { - friend class Value; - public: - typedef unsigned int size_t; - typedef int difference_type; - typedef const Value &reference; - typedef const Value *pointer; - typedef ValueConstIterator SelfType; - - ValueConstIterator(); - private: - /*! \internal Use by Value to create an iterator. - */ -#ifndef JSON_VALUE_USE_INTERNAL_MAP - explicit ValueConstIterator( const Value::ObjectValues::iterator ¤t ); -#else - ValueConstIterator( const ValueInternalArray::IteratorState &state ); - ValueConstIterator( const ValueInternalMap::IteratorState &state ); -#endif - public: - SelfType &operator =( const ValueIteratorBase &other ); - - SelfType operator++( int ) - { - SelfType temp( *this ); - ++*this; - return temp; - } - - SelfType operator--( int ) - { - SelfType temp( *this ); - --*this; - return temp; - } - - SelfType &operator--() - { - decrement(); - return *this; - } - - SelfType &operator++() - { - increment(); - return *this; - } - - reference operator *() const - { - return deref(); - } - }; - - - /** \brief Iterator for object and array value. - */ - class ValueIterator : public ValueIteratorBase - { - friend class Value; - public: - typedef unsigned int size_t; - typedef int difference_type; - typedef Value &reference; - typedef Value *pointer; - typedef ValueIterator SelfType; - - ValueIterator(); - ValueIterator( const ValueConstIterator &other ); - ValueIterator( const ValueIterator &other ); - private: - /*! \internal Use by Value to create an iterator. - */ -#ifndef JSON_VALUE_USE_INTERNAL_MAP - explicit ValueIterator( const Value::ObjectValues::iterator ¤t ); -#else - ValueIterator( const ValueInternalArray::IteratorState &state ); - ValueIterator( const ValueInternalMap::IteratorState &state ); -#endif - public: - - SelfType &operator =( const SelfType &other ); - - SelfType operator++( int ) - { - SelfType temp( *this ); - ++*this; - return temp; - } - - SelfType operator--( int ) - { - SelfType temp( *this ); - --*this; - return temp; - } - - SelfType &operator--() - { - decrement(); - return *this; - } - - SelfType &operator++() - { - increment(); - return *this; - } - - reference operator *() const - { - return deref(); - } - }; - - -} // namespace Json - - -#endif // CPPTL_JSON_H_INCLUDED - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: include/json/value.h -// ////////////////////////////////////////////////////////////////////// - - - - - - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: include/json/reader.h -// ////////////////////////////////////////////////////////////////////// - -// Copyright 2007-2010 Baptiste Lepilleur -// Distributed under MIT license, or public domain if desired and -// recognized in your jurisdiction. -// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE - -#ifndef CPPTL_JSON_READER_H_INCLUDED -# define CPPTL_JSON_READER_H_INCLUDED - -#if !defined(JSON_IS_AMALGAMATION) -# include "features.h" -# include "value.h" -#endif // if !defined(JSON_IS_AMALGAMATION) -# include -# include -# include -# include - -namespace Json { - - /** \brief Unserialize a JSON document into a Value. - * - */ - class JSON_API Reader - { - public: - typedef char Char; - typedef const Char *Location; - - /** \brief Constructs a Reader allowing all features - * for parsing. - */ - Reader(); - - /** \brief Constructs a Reader allowing the specified feature set - * for parsing. - */ - Reader( const Features &features ); - - /** \brief Read a Value from a JSON document. - * \param document UTF-8 encoded string containing the document to read. - * \param root [out] Contains the root value of the document if it was - * successfully parsed. - * \param collectComments \c true to collect comment and allow writing them back during - * serialization, \c false to discard comments. - * This parameter is ignored if Features::allowComments_ - * is \c false. - * \return \c true if the document was successfully parsed, \c false if an error occurred. - */ - bool parse( const std::string &document, - Value &root, - bool collectComments = true ); - - /** \brief Read a Value from a JSON document. - * \param beginDoc Pointer on the beginning of the UTF-8 encoded string of the document to read. - * \param endDoc Pointer on the end of the UTF-8 encoded string of the document to read. - \ Must be >= beginDoc. - * \param root [out] Contains the root value of the document if it was - * successfully parsed. - * \param collectComments \c true to collect comment and allow writing them back during - * serialization, \c false to discard comments. - * This parameter is ignored if Features::allowComments_ - * is \c false. - * \return \c true if the document was successfully parsed, \c false if an error occurred. - */ - bool parse( const char *beginDoc, const char *endDoc, - Value &root, - bool collectComments = true ); - - /// \brief Parse from input stream. - /// \see Json::operator>>(std::istream&, Json::Value&). - bool parse( std::istream &is, - Value &root, - bool collectComments = true ); - - /** \brief Returns a user friendly string that list errors in the parsed document. - * \return Formatted error message with the list of errors with their location in - * the parsed document. An empty string is returned if no error occurred - * during parsing. - * \deprecated Use getFormattedErrorMessages() instead (typo fix). - */ - JSONCPP_DEPRECATED("Use getFormattedErrorMessages instead") - std::string getFormatedErrorMessages() const; - - /** \brief Returns a user friendly string that list errors in the parsed document. - * \return Formatted error message with the list of errors with their location in - * the parsed document. An empty string is returned if no error occurred - * during parsing. - */ - std::string getFormattedErrorMessages() const; - - private: - enum TokenType - { - tokenEndOfStream = 0, - tokenObjectBegin, - tokenObjectEnd, - tokenArrayBegin, - tokenArrayEnd, - tokenString, - tokenNumber, - tokenTrue, - tokenFalse, - tokenNull, - tokenArraySeparator, - tokenMemberSeparator, - tokenComment, - tokenError - }; - - class Token - { - public: - TokenType type_; - Location start_; - Location end_; - }; - - class ErrorInfo - { - public: - Token token_; - std::string message_; - Location extra_; - }; - - typedef std::deque Errors; - - bool expectToken( TokenType type, Token &token, const char *message ); - bool readToken( Token &token ); - void skipSpaces(); - bool match( Location pattern, - int patternLength ); - bool readComment(); - bool readCStyleComment(); - bool readCppStyleComment(); - bool readString(); - void readNumber(); - bool readValue(); - bool readObject( Token &token ); - bool readArray( Token &token ); - bool decodeNumber( Token &token ); - bool decodeString( Token &token ); - bool decodeString( Token &token, std::string &decoded ); - bool decodeDouble( Token &token ); - bool decodeUnicodeCodePoint( Token &token, - Location ¤t, - Location end, - unsigned int &unicode ); - bool decodeUnicodeEscapeSequence( Token &token, - Location ¤t, - Location end, - unsigned int &unicode ); - bool addError( const std::string &message, - Token &token, - Location extra = 0 ); - bool recoverFromError( TokenType skipUntilToken ); - bool addErrorAndRecover( const std::string &message, - Token &token, - TokenType skipUntilToken ); - void skipUntilSpace(); - Value ¤tValue(); - Char getNextChar(); - void getLocationLineAndColumn( Location location, - int &line, - int &column ) const; - std::string getLocationLineAndColumn( Location location ) const; - void addComment( Location begin, - Location end, - CommentPlacement placement ); - void skipCommentTokens( Token &token ); - - typedef std::stack Nodes; - Nodes nodes_; - Errors errors_; - std::string document_; - Location begin_; - Location end_; - Location current_; - Location lastValueEnd_; - Value *lastValue_; - std::string commentsBefore_; - Features features_; - bool collectComments_; - }; - - /** \brief Read from 'sin' into 'root'. - - Always keep comments from the input JSON. - - This can be used to read a file into a particular sub-object. - For example: - \code - Json::Value root; - cin >> root["dir"]["file"]; - cout << root; - \endcode - Result: - \verbatim - { - "dir": { - "file": { - // The input stream JSON would be nested here. - } - } - } - \endverbatim - \throw std::exception on parse error. - \see Json::operator<<() - */ - std::istream& operator>>( std::istream&, Value& ); - -} // namespace Json - -#endif // CPPTL_JSON_READER_H_INCLUDED - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: include/json/reader.h -// ////////////////////////////////////////////////////////////////////// - - - - - - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: include/json/writer.h -// ////////////////////////////////////////////////////////////////////// - -// Copyright 2007-2010 Baptiste Lepilleur -// Distributed under MIT license, or public domain if desired and -// recognized in your jurisdiction. -// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE - -#ifndef JSON_WRITER_H_INCLUDED -# define JSON_WRITER_H_INCLUDED - -#if !defined(JSON_IS_AMALGAMATION) -# include "value.h" -#endif // if !defined(JSON_IS_AMALGAMATION) -# include -# include -# include - -namespace Json { - - class Value; - - /** \brief Abstract class for writers. - */ - class JSON_API Writer - { - public: - virtual ~Writer(); - - virtual std::string write( const Value &root ) = 0; - }; - - /** \brief Outputs a Value in JSON format without formatting (not human friendly). - * - * The JSON document is written in a single line. It is not intended for 'human' consumption, - * but may be usefull to support feature such as RPC where bandwith is limited. - * \sa Reader, Value - */ - class JSON_API FastWriter : public Writer - { - public: - FastWriter(); - virtual ~FastWriter(){} - - void enableYAMLCompatibility(); - - public: // overridden from Writer - virtual std::string write( const Value &root ); - - private: - void writeValue( const Value &value ); - - std::string document_; - bool yamlCompatiblityEnabled_; - }; - - /** \brief Writes a Value in JSON format in a human friendly way. - * - * The rules for line break and indent are as follow: - * - Object value: - * - if empty then print {} without indent and line break - * - if not empty the print '{', line break & indent, print one value per line - * and then unindent and line break and print '}'. - * - Array value: - * - if empty then print [] without indent and line break - * - if the array contains no object value, empty array or some other value types, - * and all the values fit on one lines, then print the array on a single line. - * - otherwise, it the values do not fit on one line, or the array contains - * object or non empty array, then print one value per line. - * - * If the Value have comments then they are outputed according to their #CommentPlacement. - * - * \sa Reader, Value, Value::setComment() - */ - class JSON_API StyledWriter: public Writer - { - public: - StyledWriter(); - virtual ~StyledWriter(){} - - public: // overridden from Writer - /** \brief Serialize a Value in JSON format. - * \param root Value to serialize. - * \return String containing the JSON document that represents the root value. - */ - virtual std::string write( const Value &root ); - - private: - void writeValue( const Value &value ); - void writeArrayValue( const Value &value ); - bool isMultineArray( const Value &value ); - void pushValue( const std::string &value ); - void writeIndent(); - void writeWithIndent( const std::string &value ); - void indent(); - void unindent(); - void writeCommentBeforeValue( const Value &root ); - void writeCommentAfterValueOnSameLine( const Value &root ); - bool hasCommentForValue( const Value &value ); - static std::string normalizeEOL( const std::string &text ); - - typedef std::vector ChildValues; - - ChildValues childValues_; - std::string document_; - std::string indentString_; - int rightMargin_; - int indentSize_; - bool addChildValues_; - }; - - /** \brief Writes a Value in JSON format in a human friendly way, - to a stream rather than to a string. - * - * The rules for line break and indent are as follow: - * - Object value: - * - if empty then print {} without indent and line break - * - if not empty the print '{', line break & indent, print one value per line - * and then unindent and line break and print '}'. - * - Array value: - * - if empty then print [] without indent and line break - * - if the array contains no object value, empty array or some other value types, - * and all the values fit on one lines, then print the array on a single line. - * - otherwise, it the values do not fit on one line, or the array contains - * object or non empty array, then print one value per line. - * - * If the Value have comments then they are outputed according to their #CommentPlacement. - * - * \param indentation Each level will be indented by this amount extra. - * \sa Reader, Value, Value::setComment() - */ - class JSON_API StyledStreamWriter - { - public: - StyledStreamWriter( std::string indentation="\t" ); - ~StyledStreamWriter(){} - - public: - /** \brief Serialize a Value in JSON format. - * \param out Stream to write to. (Can be ostringstream, e.g.) - * \param root Value to serialize. - * \note There is no point in deriving from Writer, since write() should not return a value. - */ - void write( std::ostream &out, const Value &root ); - - private: - void writeValue( const Value &value ); - void writeArrayValue( const Value &value ); - bool isMultineArray( const Value &value ); - void pushValue( const std::string &value ); - void writeIndent(); - void writeWithIndent( const std::string &value ); - void indent(); - void unindent(); - void writeCommentBeforeValue( const Value &root ); - void writeCommentAfterValueOnSameLine( const Value &root ); - bool hasCommentForValue( const Value &value ); - static std::string normalizeEOL( const std::string &text ); - - typedef std::vector ChildValues; - - ChildValues childValues_; - std::ostream* document_; - std::string indentString_; - int rightMargin_; - std::string indentation_; - bool addChildValues_; - }; - -# if defined(JSON_HAS_INT64) - std::string JSON_API valueToString( Int value ); - std::string JSON_API valueToString( UInt value ); -# endif // if defined(JSON_HAS_INT64) - std::string JSON_API valueToString( LargestInt value ); - std::string JSON_API valueToString( LargestUInt value ); - std::string JSON_API valueToString( double value ); - std::string JSON_API valueToString( bool value ); - std::string JSON_API valueToQuotedString( const char *value ); - - /// \brief Output using the StyledStreamWriter. - /// \see Json::operator>>() - std::ostream& operator<<( std::ostream&, const Value &root ); - -} // namespace Json - - - -#endif // JSON_WRITER_H_INCLUDED - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: include/json/writer.h -// ////////////////////////////////////////////////////////////////////// - - - - - -#endif //ifndef JSON_AMALGATED_H_INCLUDED diff --git a/third_party/jsoncpp/jsoncpp.cpp b/third_party/jsoncpp/jsoncpp.cpp deleted file mode 100644 index 8fb8c25d2c..0000000000 --- a/third_party/jsoncpp/jsoncpp.cpp +++ /dev/null @@ -1,3152 +0,0 @@ -/// Json-cpp amalgated source (http://jsoncpp.sourceforge.net/). -/// It is intented to be used with #include - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: LICENSE -// ////////////////////////////////////////////////////////////////////// - -/* -The JsonCpp library's source code, including accompanying documentation, -tests and demonstration applications, are licensed under the following -conditions... - -The author (Baptiste Lepilleur) explicitly disclaims copyright in all -jurisdictions which recognize such a disclaimer. In such jurisdictions, -this software is released into the Public Domain. - -In jurisdictions which do not recognize Public Domain property (e.g. Germany as -of -2010), this software is Copyright (c) 2007-2010 by Baptiste Lepilleur, and is -released under the terms of the MIT License (see below). - -In jurisdictions which recognize Public Domain property, the user of this -software may choose to accept it either as 1) Public Domain, 2) under the -conditions of the MIT License (see below), or 3) under the terms of dual -Public Domain/MIT License conditions described here, as they choose. - -The MIT License is about as close to Public Domain as a license can get, and is -described in clear, concise terms at: - - http://en.wikipedia.org/wiki/MIT_License - -The full text of the MIT License follows: - -======================================================================== -Copyright (c) 2007-2010 Baptiste Lepilleur - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, copy, -modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -======================================================================== -(END LICENSE TEXT) - -The MIT license is compatible with both the GPL and commercial -software, affording one all of the rights of Public Domain with the -minor nuisance of being required to keep the above copyright notice -and license text in the source code. Note also that by accepting the -Public Domain "license" you can re-license your copy using whatever -license you like. - -*/ - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: LICENSE -// ////////////////////////////////////////////////////////////////////// - -#include - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: src/lib_json/json_tool.h -// ////////////////////////////////////////////////////////////////////// - -// Copyright 2007-2010 Baptiste Lepilleur -// Distributed under MIT license, or public domain if desired and -// recognized in your jurisdiction. -// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE - -#ifndef LIB_JSONCPP_JSON_TOOL_H_INCLUDED -#define LIB_JSONCPP_JSON_TOOL_H_INCLUDED - -/* This header provides common string manipulation support, such as UTF-8, - * portable conversion from/to string... - * - * It is an internal header that must not be exposed. - */ - -namespace Json { - -/// Converts a unicode code-point to UTF-8. -static inline std::string codePointToUTF8(unsigned int cp) { - std::string result; - - // based on description from http://en.wikipedia.org/wiki/UTF-8 - - if (cp <= 0x7f) { - result.resize(1); - result[0] = static_cast(cp); - } else if (cp <= 0x7FF) { - result.resize(2); - result[1] = static_cast(0x80 | (0x3f & cp)); - result[0] = static_cast(0xC0 | (0x1f & (cp >> 6))); - } else if (cp <= 0xFFFF) { - result.resize(3); - result[2] = static_cast(0x80 | (0x3f & cp)); - result[1] = 0x80 | static_cast((0x3f & (cp >> 6))); - result[0] = 0xE0 | static_cast((0xf & (cp >> 12))); - } else if (cp <= 0x10FFFF) { - result.resize(4); - result[3] = static_cast(0x80 | (0x3f & cp)); - result[2] = static_cast(0x80 | (0x3f & (cp >> 6))); - result[1] = static_cast(0x80 | (0x3f & (cp >> 12))); - result[0] = static_cast(0xF0 | (0x7 & (cp >> 18))); - } - - return result; -} - -/// Returns true if ch is a control character (in range [0,32[). -static inline bool isControlCharacter(char ch) { return ch > 0 && ch <= 0x1F; } - -enum { - /// Constant that specify the size of the buffer that must be passed to - /// uintToString. - uintToStringBufferSize = 3 * sizeof(LargestUInt) + 1 -}; - -// Defines a char buffer for use with uintToString(). -typedef char UIntToStringBuffer[uintToStringBufferSize]; - -/** Converts an unsigned integer to string. - * @param value Unsigned interger to convert to string - * @param current Input/Output string buffer. - * Must have at least uintToStringBufferSize chars free. - */ -static inline void uintToString(LargestUInt value, char *¤t) { - *--current = 0; - do { - *--current = char(value % 10) + '0'; - value /= 10; - } while (value != 0); -} - -} // namespace Json { - -#endif // LIB_JSONCPP_JSON_TOOL_H_INCLUDED - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: src/lib_json/json_tool.h -// ////////////////////////////////////////////////////////////////////// - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: src/lib_json/json_reader.cpp -// ////////////////////////////////////////////////////////////////////// - -// Copyright 2007-2010 Baptiste Lepilleur -// Distributed under MIT license, or public domain if desired and -// recognized in your jurisdiction. -// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE - -#if !defined(JSON_IS_AMALGAMATION) -#include -#include -#include "json_tool.h" -#endif // if !defined(JSON_IS_AMALGAMATION) -#include -#include -#include -#include -#include -#include - -#if _MSC_VER >= 1400 // VC++ 8.0 -#pragma warning(disable : 4996) // disable warning about strdup being deprecated. -#endif - -namespace Json { - -// Implementation of class Features -// //////////////////////////////// - -Features::Features() : allowComments_(true), strictRoot_(false) {} - -Features Features::all() { return Features(); } - -Features Features::strictMode() { - Features features; - features.allowComments_ = false; - features.strictRoot_ = true; - return features; -} - -// Implementation of class Reader -// //////////////////////////////// - -static inline bool in(Reader::Char c, Reader::Char c1, Reader::Char c2, Reader::Char c3, Reader::Char c4) { - return c == c1 || c == c2 || c == c3 || c == c4; -} - -static inline bool in(Reader::Char c, Reader::Char c1, Reader::Char c2, Reader::Char c3, Reader::Char c4, - Reader::Char c5) { - return c == c1 || c == c2 || c == c3 || c == c4 || c == c5; -} - -static bool containsNewLine(Reader::Location begin, Reader::Location end) { - for (; begin < end; ++begin) - if (*begin == '\n' || *begin == '\r') return true; - return false; -} - -// Class Reader -// ////////////////////////////////////////////////////////////////// - -Reader::Reader() : features_(Features::all()) {} - -Reader::Reader(const Features &features) : features_(features) {} - -bool Reader::parse(const std::string &document, Value &root, bool collectComments) { - document_ = document; - const char *begin = document_.c_str(); - const char *end = begin + document_.length(); - return parse(begin, end, root, collectComments); -} - -bool Reader::parse(std::istream &sin, Value &root, bool collectComments) { - // std::istream_iterator begin(sin); - // std::istream_iterator end; - // Those would allow streamed input from a file, if parse() were a - // template function. - - // Since std::string is reference-counted, this at least does not - // create an extra copy. - std::string doc; - std::getline(sin, doc, (char)EOF); - return parse(doc, root, collectComments); -} - -bool Reader::parse(const char *beginDoc, const char *endDoc, Value &root, bool collectComments) { - if (!features_.allowComments_) { - collectComments = false; - } - - begin_ = beginDoc; - end_ = endDoc; - collectComments_ = collectComments; - current_ = begin_; - lastValueEnd_ = 0; - lastValue_ = 0; - commentsBefore_ = ""; - errors_.clear(); - while (!nodes_.empty()) nodes_.pop(); - nodes_.push(&root); - - bool successful = readValue(); - Token token; - skipCommentTokens(token); - if (collectComments_ && !commentsBefore_.empty()) root.setComment(commentsBefore_, commentAfter); - if (features_.strictRoot_) { - if (!root.isArray() && !root.isObject()) { - // Set error location to start of doc, ideally should be first token found - // in doc - token.type_ = tokenError; - token.start_ = beginDoc; - token.end_ = endDoc; - addError("A valid JSON document must be either an array or an object value.", token); - return false; - } - } - return successful; -} - -bool Reader::readValue() { - Token token; - skipCommentTokens(token); - bool successful = true; - - if (collectComments_ && !commentsBefore_.empty()) { - currentValue().setComment(commentsBefore_, commentBefore); - commentsBefore_ = ""; - } - - switch (token.type_) { - case tokenObjectBegin: - successful = readObject(token); - break; - case tokenArrayBegin: - successful = readArray(token); - break; - case tokenNumber: - successful = decodeNumber(token); - break; - case tokenString: - successful = decodeString(token); - break; - case tokenTrue: - currentValue() = true; - break; - case tokenFalse: - currentValue() = false; - break; - case tokenNull: - currentValue() = Value(); - break; - default: - return addError("Syntax error: value, object or array expected.", token); - } - - if (collectComments_) { - lastValueEnd_ = current_; - lastValue_ = ¤tValue(); - } - - return successful; -} - -void Reader::skipCommentTokens(Token &token) { - if (features_.allowComments_) { - do { - readToken(token); - } while (token.type_ == tokenComment); - } else { - readToken(token); - } -} - -bool Reader::expectToken(TokenType type, Token &token, const char *message) { - readToken(token); - if (token.type_ != type) return addError(message, token); - return true; -} - -bool Reader::readToken(Token &token) { - skipSpaces(); - token.start_ = current_; - Char c = getNextChar(); - bool ok = true; - switch (c) { - case '{': - token.type_ = tokenObjectBegin; - break; - case '}': - token.type_ = tokenObjectEnd; - break; - case '[': - token.type_ = tokenArrayBegin; - break; - case ']': - token.type_ = tokenArrayEnd; - break; - case '"': - token.type_ = tokenString; - ok = readString(); - break; - case '/': - token.type_ = tokenComment; - ok = readComment(); - break; - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - case '-': - token.type_ = tokenNumber; - readNumber(); - break; - case 't': - token.type_ = tokenTrue; - ok = match("rue", 3); - break; - case 'f': - token.type_ = tokenFalse; - ok = match("alse", 4); - break; - case 'n': - token.type_ = tokenNull; - ok = match("ull", 3); - break; - case ',': - token.type_ = tokenArraySeparator; - break; - case ':': - token.type_ = tokenMemberSeparator; - break; - case 0: - token.type_ = tokenEndOfStream; - break; - default: - ok = false; - break; - } - if (!ok) token.type_ = tokenError; - token.end_ = current_; - return true; -} - -void Reader::skipSpaces() { - while (current_ != end_) { - Char c = *current_; - if (c == ' ' || c == '\t' || c == '\r' || c == '\n') - ++current_; - else - break; - } -} - -bool Reader::match(Location pattern, int patternLength) { - if (end_ - current_ < patternLength) return false; - int index = patternLength; - while (index--) - if (current_[index] != pattern[index]) return false; - current_ += patternLength; - return true; -} - -bool Reader::readComment() { - Location commentBegin = current_ - 1; - Char c = getNextChar(); - bool successful = false; - if (c == '*') - successful = readCStyleComment(); - else if (c == '/') - successful = readCppStyleComment(); - if (!successful) return false; - - if (collectComments_) { - CommentPlacement placement = commentBefore; - if (lastValueEnd_ && !containsNewLine(lastValueEnd_, commentBegin)) { - if (c != '*' || !containsNewLine(commentBegin, current_)) placement = commentAfterOnSameLine; - } - - addComment(commentBegin, current_, placement); - } - return true; -} - -void Reader::addComment(Location begin, Location end, CommentPlacement placement) { - assert(collectComments_); - if (placement == commentAfterOnSameLine) { - assert(lastValue_ != 0); - lastValue_->setComment(std::string(begin, end), placement); - } else { - if (!commentsBefore_.empty()) commentsBefore_ += "\n"; - commentsBefore_ += std::string(begin, end); - } -} - -bool Reader::readCStyleComment() { - while (current_ != end_) { - Char c = getNextChar(); - if (c == '*' && *current_ == '/') break; - } - return getNextChar() == '/'; -} - -bool Reader::readCppStyleComment() { - while (current_ != end_) { - Char c = getNextChar(); - if (c == '\r' || c == '\n') break; - } - return true; -} - -void Reader::readNumber() { - while (current_ != end_) { - if (!(*current_ >= '0' && *current_ <= '9') && !in(*current_, '.', 'e', 'E', '+', '-')) break; - ++current_; - } -} - -bool Reader::readString() { - Char c = 0; - while (current_ != end_) { - c = getNextChar(); - if (c == '\\') - getNextChar(); - else if (c == '"') - break; - } - return c == '"'; -} - -bool Reader::readObject(Token & /*tokenStart*/) { - Token tokenName; - std::string name; - currentValue() = Value(objectValue); - while (readToken(tokenName)) { - bool initialTokenOk = true; - while (tokenName.type_ == tokenComment && initialTokenOk) initialTokenOk = readToken(tokenName); - if (!initialTokenOk) break; - if (tokenName.type_ == tokenObjectEnd && name.empty()) // empty object - return true; - if (tokenName.type_ != tokenString) break; - - name = ""; - if (!decodeString(tokenName, name)) return recoverFromError(tokenObjectEnd); - - Token colon; - if (!readToken(colon) || colon.type_ != tokenMemberSeparator) { - return addErrorAndRecover("Missing ':' after object member name", colon, tokenObjectEnd); - } - Value &value = currentValue()[name]; - nodes_.push(&value); - bool ok = readValue(); - nodes_.pop(); - if (!ok) // error already set - return recoverFromError(tokenObjectEnd); - - Token comma; - if (!readToken(comma) || - (comma.type_ != tokenObjectEnd && comma.type_ != tokenArraySeparator && comma.type_ != tokenComment)) { - return addErrorAndRecover("Missing ',' or '}' in object declaration", comma, tokenObjectEnd); - } - bool finalizeTokenOk = true; - while (comma.type_ == tokenComment && finalizeTokenOk) finalizeTokenOk = readToken(comma); - if (comma.type_ == tokenObjectEnd) return true; - } - return addErrorAndRecover("Missing '}' or object member name", tokenName, tokenObjectEnd); -} - -bool Reader::readArray(Token & /*tokenStart*/) { - currentValue() = Value(arrayValue); - skipSpaces(); - if (*current_ == ']') // empty array - { - Token endArray; - readToken(endArray); - return true; - } - int index = 0; - for (;;) { - Value &value = currentValue()[index++]; - nodes_.push(&value); - bool ok = readValue(); - nodes_.pop(); - if (!ok) // error already set - return recoverFromError(tokenArrayEnd); - - Token token; - // Accept Comment after last item in the array. - ok = readToken(token); - while (token.type_ == tokenComment && ok) { - ok = readToken(token); - } - bool badTokenType = (token.type_ != tokenArraySeparator && token.type_ != tokenArrayEnd); - if (!ok || badTokenType) { - return addErrorAndRecover("Missing ',' or ']' in array declaration", token, tokenArrayEnd); - } - if (token.type_ == tokenArrayEnd) break; - } - return true; -} - -bool Reader::decodeNumber(Token &token) { - bool isDouble = false; - for (Location inspect = token.start_; inspect != token.end_; ++inspect) { - isDouble = isDouble || in(*inspect, '.', 'e', 'E', '+') || (*inspect == '-' && inspect != token.start_); - } - if (isDouble) return decodeDouble(token); - // Attempts to parse the number as an integer. If the number is - // larger than the maximum supported value of an integer then - // we decode the number as a double. - Location current = token.start_; - bool isNegative = *current == '-'; - if (isNegative) ++current; - Value::LargestUInt maxIntegerValue = isNegative ? Value::LargestUInt(-Value::minLargestInt) : Value::maxLargestUInt; - Value::LargestUInt threshold = maxIntegerValue / 10; - Value::UInt lastDigitThreshold = Value::UInt(maxIntegerValue % 10); - assert(lastDigitThreshold >= 0 && lastDigitThreshold <= 9); - Value::LargestUInt value = 0; - while (current < token.end_) { - Char c = *current++; - if (c < '0' || c > '9') return addError("'" + std::string(token.start_, token.end_) + "' is not a number.", token); - Value::UInt digit(c - '0'); - if (value >= threshold) { - // If the current digit is not the last one, or if it is - // greater than the last digit of the maximum integer value, - // the parse the number as a double. - if (current != token.end_ || digit > lastDigitThreshold) { - return decodeDouble(token); - } - } - value = value * 10 + digit; - } - if (isNegative) - currentValue() = -Value::LargestInt(value); - else if (value <= Value::LargestUInt(Value::maxInt)) - currentValue() = Value::LargestInt(value); - else - currentValue() = value; - return true; -} - -bool Reader::decodeDouble(Token &token) { - double value = 0; - const int bufferSize = 32; - int count; - int length = int(token.end_ - token.start_); - if (length <= bufferSize) { - Char buffer[bufferSize + 1]; - memcpy(buffer, token.start_, length); - buffer[length] = 0; - count = sscanf(buffer, "%lf", &value); - } else { - std::string buffer(token.start_, token.end_); - count = sscanf(buffer.c_str(), "%lf", &value); - } - - if (count != 1) return addError("'" + std::string(token.start_, token.end_) + "' is not a number.", token); - currentValue() = value; - return true; -} - -bool Reader::decodeString(Token &token) { - std::string decoded; - if (!decodeString(token, decoded)) return false; - currentValue() = decoded; - return true; -} - -bool Reader::decodeString(Token &token, std::string &decoded) { - decoded.reserve(token.end_ - token.start_ - 2); - Location current = token.start_ + 1; // skip '"' - Location end = token.end_ - 1; // do not include '"' - while (current != end) { - Char c = *current++; - if (c == '"') - break; - else if (c == '\\') { - if (current == end) return addError("Empty escape sequence in string", token, current); - Char escape = *current++; - switch (escape) { - case '"': - decoded += '"'; - break; - case '/': - decoded += '/'; - break; - case '\\': - decoded += '\\'; - break; - case 'b': - decoded += '\b'; - break; - case 'f': - decoded += '\f'; - break; - case 'n': - decoded += '\n'; - break; - case 'r': - decoded += '\r'; - break; - case 't': - decoded += '\t'; - break; - case 'u': { - unsigned int unicode; - if (!decodeUnicodeCodePoint(token, current, end, unicode)) return false; - decoded += codePointToUTF8(unicode); - } break; - default: - return addError("Bad escape sequence in string", token, current); - } - } else { - decoded += c; - } - } - return true; -} - -bool Reader::decodeUnicodeCodePoint(Token &token, Location ¤t, Location end, unsigned int &unicode) { - if (!decodeUnicodeEscapeSequence(token, current, end, unicode)) return false; - if (unicode >= 0xD800 && unicode <= 0xDBFF) { - // surrogate pairs - if (end - current < 6) - return addError("additional six characters expected to parse unicode surrogate pair.", token, current); - unsigned int surrogatePair; - if (*(current++) == '\\' && *(current++) == 'u') { - if (decodeUnicodeEscapeSequence(token, current, end, surrogatePair)) { - unicode = 0x10000 + ((unicode & 0x3FF) << 10) + (surrogatePair & 0x3FF); - } else - return false; - } else - return addError( - "expecting another \\u token to begin the second half of a unicode " - "surrogate pair", - token, current); - } - return true; -} - -bool Reader::decodeUnicodeEscapeSequence(Token &token, Location ¤t, Location end, unsigned int &unicode) { - if (end - current < 4) - return addError("Bad unicode escape sequence in string: four digits expected.", token, current); - unicode = 0; - for (int index = 0; index < 4; ++index) { - Char c = *current++; - unicode *= 16; - if (c >= '0' && c <= '9') - unicode += c - '0'; - else if (c >= 'a' && c <= 'f') - unicode += c - 'a' + 10; - else if (c >= 'A' && c <= 'F') - unicode += c - 'A' + 10; - else - return addError("Bad unicode escape sequence in string: hexadecimal digit expected.", token, current); - } - return true; -} - -bool Reader::addError(const std::string &message, Token &token, Location extra) { - ErrorInfo info; - info.token_ = token; - info.message_ = message; - info.extra_ = extra; - errors_.push_back(info); - return false; -} - -bool Reader::recoverFromError(TokenType skipUntilToken) { - int errorCount = int(errors_.size()); - Token skip; - for (;;) { - if (!readToken(skip)) errors_.resize(errorCount); // discard errors caused by recovery - if (skip.type_ == skipUntilToken || skip.type_ == tokenEndOfStream) break; - } - errors_.resize(errorCount); - return false; -} - -bool Reader::addErrorAndRecover(const std::string &message, Token &token, TokenType skipUntilToken) { - addError(message, token); - return recoverFromError(skipUntilToken); -} - -Value &Reader::currentValue() { return *(nodes_.top()); } - -Reader::Char Reader::getNextChar() { - if (current_ == end_) return 0; - return *current_++; -} - -void Reader::getLocationLineAndColumn(Location location, int &line, int &column) const { - Location current = begin_; - Location lastLineStart = current; - line = 0; - while (current < location && current != end_) { - Char c = *current++; - if (c == '\r') { - if (*current == '\n') ++current; - lastLineStart = current; - ++line; - } else if (c == '\n') { - lastLineStart = current; - ++line; - } - } - // column & line start at 1 - column = int(location - lastLineStart) + 1; - ++line; -} - -std::string Reader::getLocationLineAndColumn(Location location) const { - int line, column; - getLocationLineAndColumn(location, line, column); - char buffer[18 + 16 + 16 + 1]; - sprintf(buffer, "Line %d, Column %d", line, column); - return buffer; -} - -// Deprecated. Preserved for backward compatibility -std::string Reader::getFormatedErrorMessages() const { return getFormattedErrorMessages(); } - -std::string Reader::getFormattedErrorMessages() const { - std::string formattedMessage; - for (Errors::const_iterator itError = errors_.begin(); itError != errors_.end(); ++itError) { - const ErrorInfo &error = *itError; - formattedMessage += "* " + getLocationLineAndColumn(error.token_.start_) + "\n"; - formattedMessage += " " + error.message_ + "\n"; - if (error.extra_) formattedMessage += "See " + getLocationLineAndColumn(error.extra_) + " for detail.\n"; - } - return formattedMessage; -} - -std::istream &operator>>(std::istream &sin, Value &root) { - Json::Reader reader; - bool ok = reader.parse(sin, root, true); - // JSON_ASSERT( ok ); - if (!ok) throw std::runtime_error(reader.getFormattedErrorMessages()); - return sin; -} - -} // namespace Json - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: src/lib_json/json_reader.cpp -// ////////////////////////////////////////////////////////////////////// - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: src/lib_json/json_batchallocator.h -// ////////////////////////////////////////////////////////////////////// - -// Copyright 2007-2010 Baptiste Lepilleur -// Distributed under MIT license, or public domain if desired and -// recognized in your jurisdiction. -// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE - -#ifndef JSONCPP_BATCHALLOCATOR_H_INCLUDED -#define JSONCPP_BATCHALLOCATOR_H_INCLUDED - -#include -#include - -#ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION - -namespace Json { - -/* Fast memory allocator. - * - * This memory allocator allocates memory for a batch of object (specified by - * the page size, the number of object in each page). - * - * It does not allow the destruction of a single object. All the allocated - * objects - * can be destroyed at once. The memory can be either released or reused for - * future - * allocation. - * - * The in-place new operator must be used to construct the object using the - * pointer - * returned by allocate. - */ -template -class BatchAllocator { - public: - typedef AllocatedType Type; - - BatchAllocator(unsigned int objectsPerPage = 255) : freeHead_(0), objectsPerPage_(objectsPerPage) { - // printf( "Size: %d => %s\n", sizeof(AllocatedType), - // typeid(AllocatedType).name() ); - assert(sizeof(AllocatedType) * objectPerAllocation >= - sizeof(AllocatedType *)); // We must be able to store a slist in the - // object free space. - assert(objectsPerPage >= 16); - batches_ = allocateBatch(0); // allocated a dummy page - currentBatch_ = batches_; - } - - ~BatchAllocator() { - for (BatchInfo *batch = batches_; batch;) { - BatchInfo *nextBatch = batch->next_; - free(batch); - batch = nextBatch; - } - } - - /// allocate space for an array of objectPerAllocation object. - /// @warning it is the responsability of the caller to call objects - /// constructors. - AllocatedType *allocate() { - if (freeHead_) // returns node from free list. - { - AllocatedType *object = freeHead_; - freeHead_ = *(AllocatedType **)object; - return object; - } - if (currentBatch_->used_ == currentBatch_->end_) { - currentBatch_ = currentBatch_->next_; - while (currentBatch_ && currentBatch_->used_ == currentBatch_->end_) currentBatch_ = currentBatch_->next_; - - if (!currentBatch_) // no free batch found, allocate a new one - { - currentBatch_ = allocateBatch(objectsPerPage_); - currentBatch_->next_ = batches_; // insert at the head of the list - batches_ = currentBatch_; - } - } - AllocatedType *allocated = currentBatch_->used_; - currentBatch_->used_ += objectPerAllocation; - return allocated; - } - - /// Release the object. - /// @warning it is the responsability of the caller to actually destruct the - /// object. - void release(AllocatedType *object) { - assert(object != 0); - *(AllocatedType **)object = freeHead_; - freeHead_ = object; - } - - private: - struct BatchInfo { - BatchInfo *next_; - AllocatedType *used_; - AllocatedType *end_; - AllocatedType buffer_[objectPerAllocation]; - }; - - // disabled copy constructor and assignement operator. - BatchAllocator(const BatchAllocator &); - void operator=(const BatchAllocator &); - - static BatchInfo *allocateBatch(unsigned int objectsPerPage) { - const unsigned int mallocSize = sizeof(BatchInfo) - sizeof(AllocatedType) * objectPerAllocation + - sizeof(AllocatedType) * objectPerAllocation * objectsPerPage; - BatchInfo *batch = static_cast(malloc(mallocSize)); - batch->next_ = 0; - batch->used_ = batch->buffer_; - batch->end_ = batch->buffer_ + objectsPerPage; - return batch; - } - - BatchInfo *batches_; - BatchInfo *currentBatch_; - /// Head of a single linked list within the allocated space of freeed object - AllocatedType *freeHead_; - unsigned int objectsPerPage_; -}; - -} // namespace Json - -#endif // ifndef JSONCPP_DOC_INCLUDE_IMPLEMENTATION - -#endif // JSONCPP_BATCHALLOCATOR_H_INCLUDED - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: src/lib_json/json_batchallocator.h -// ////////////////////////////////////////////////////////////////////// - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: src/lib_json/json_valueiterator.inl -// ////////////////////////////////////////////////////////////////////// - -// Copyright 2007-2010 Baptiste Lepilleur -// Distributed under MIT license, or public domain if desired and -// recognized in your jurisdiction. -// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE - -// included by json_value.cpp - -namespace Json { - -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// class ValueIteratorBase -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// - -ValueIteratorBase::ValueIteratorBase() -#ifndef JSON_VALUE_USE_INTERNAL_MAP - : current_(), isNull_(true) { -} -#else - : isArray_(true), isNull_(true) { - iterator_.array_ = ValueInternalArray::IteratorState(); -} -#endif - -#ifndef JSON_VALUE_USE_INTERNAL_MAP -ValueIteratorBase::ValueIteratorBase(const Value::ObjectValues::iterator ¤t) - : current_(current), isNull_(false) {} -#else -ValueIteratorBase::ValueIteratorBase(const ValueInternalArray::IteratorState &state) : isArray_(true) { - iterator_.array_ = state; -} - -ValueIteratorBase::ValueIteratorBase(const ValueInternalMap::IteratorState &state) : isArray_(false) { - iterator_.map_ = state; -} -#endif - -Value &ValueIteratorBase::deref() const { -#ifndef JSON_VALUE_USE_INTERNAL_MAP - return current_->second; -#else - if (isArray_) return ValueInternalArray::dereference(iterator_.array_); - return ValueInternalMap::value(iterator_.map_); -#endif -} - -void ValueIteratorBase::increment() { -#ifndef JSON_VALUE_USE_INTERNAL_MAP - ++current_; -#else - if (isArray_) ValueInternalArray::increment(iterator_.array_); - ValueInternalMap::increment(iterator_.map_); -#endif -} - -void ValueIteratorBase::decrement() { -#ifndef JSON_VALUE_USE_INTERNAL_MAP - --current_; -#else - if (isArray_) ValueInternalArray::decrement(iterator_.array_); - ValueInternalMap::decrement(iterator_.map_); -#endif -} - -ValueIteratorBase::difference_type ValueIteratorBase::computeDistance(const SelfType &other) const { -#ifndef JSON_VALUE_USE_INTERNAL_MAP -#ifdef JSON_USE_CPPTL_SMALLMAP - return current_ - other.current_; -#else - // Iterator for null value are initialized using the default - // constructor, which initialize current_ to the default - // std::map::iterator. As begin() and end() are two instance - // of the default std::map::iterator, they can not be compared. - // To allow this, we handle this comparison specifically. - if (isNull_ && other.isNull_) { - return 0; - } - - // Usage of std::distance is not portable (does not compile with Sun Studio 12 - // RogueWave STL, - // which is the one used by default). - // Using a portable hand-made version for non random iterator instead: - // return difference_type( std::distance( current_, other.current_ ) ); - difference_type myDistance = 0; - for (Value::ObjectValues::iterator it = current_; it != other.current_; ++it) { - ++myDistance; - } - return myDistance; -#endif -#else - if (isArray_) return ValueInternalArray::distance(iterator_.array_, other.iterator_.array_); - return ValueInternalMap::distance(iterator_.map_, other.iterator_.map_); -#endif -} - -bool ValueIteratorBase::isEqual(const SelfType &other) const { -#ifndef JSON_VALUE_USE_INTERNAL_MAP - if (isNull_) { - return other.isNull_; - } - return current_ == other.current_; -#else - if (isArray_) return ValueInternalArray::equals(iterator_.array_, other.iterator_.array_); - return ValueInternalMap::equals(iterator_.map_, other.iterator_.map_); -#endif -} - -void ValueIteratorBase::copy(const SelfType &other) { -#ifndef JSON_VALUE_USE_INTERNAL_MAP - current_ = other.current_; -#else - if (isArray_) iterator_.array_ = other.iterator_.array_; - iterator_.map_ = other.iterator_.map_; -#endif -} - -Value ValueIteratorBase::key() const { -#ifndef JSON_VALUE_USE_INTERNAL_MAP - const Value::CZString czstring = (*current_).first; - if (czstring.c_str()) { - if (czstring.isStaticString()) return Value(StaticString(czstring.c_str())); - return Value(czstring.c_str()); - } - return Value(czstring.index()); -#else - if (isArray_) return Value(ValueInternalArray::indexOf(iterator_.array_)); - bool isStatic; - const char *memberName = ValueInternalMap::key(iterator_.map_, isStatic); - if (isStatic) return Value(StaticString(memberName)); - return Value(memberName); -#endif -} - -UInt ValueIteratorBase::index() const { -#ifndef JSON_VALUE_USE_INTERNAL_MAP - const Value::CZString czstring = (*current_).first; - if (!czstring.c_str()) return czstring.index(); - return Value::UInt(-1); -#else - if (isArray_) return Value::UInt(ValueInternalArray::indexOf(iterator_.array_)); - return Value::UInt(-1); -#endif -} - -const char *ValueIteratorBase::memberName() const { -#ifndef JSON_VALUE_USE_INTERNAL_MAP - const char *name = (*current_).first.c_str(); - return name ? name : ""; -#else - if (!isArray_) return ValueInternalMap::key(iterator_.map_); - return ""; -#endif -} - -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// class ValueConstIterator -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// - -ValueConstIterator::ValueConstIterator() {} - -#ifndef JSON_VALUE_USE_INTERNAL_MAP -ValueConstIterator::ValueConstIterator(const Value::ObjectValues::iterator ¤t) : ValueIteratorBase(current) {} -#else -ValueConstIterator::ValueConstIterator(const ValueInternalArray::IteratorState &state) : ValueIteratorBase(state) {} - -ValueConstIterator::ValueConstIterator(const ValueInternalMap::IteratorState &state) : ValueIteratorBase(state) {} -#endif - -ValueConstIterator &ValueConstIterator::operator=(const ValueIteratorBase &other) { - copy(other); - return *this; -} - -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// class ValueIterator -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// - -ValueIterator::ValueIterator() {} - -#ifndef JSON_VALUE_USE_INTERNAL_MAP -ValueIterator::ValueIterator(const Value::ObjectValues::iterator ¤t) : ValueIteratorBase(current) {} -#else -ValueIterator::ValueIterator(const ValueInternalArray::IteratorState &state) : ValueIteratorBase(state) {} - -ValueIterator::ValueIterator(const ValueInternalMap::IteratorState &state) : ValueIteratorBase(state) {} -#endif - -ValueIterator::ValueIterator(const ValueConstIterator &other) : ValueIteratorBase(other) {} - -ValueIterator::ValueIterator(const ValueIterator &other) : ValueIteratorBase(other) {} - -ValueIterator &ValueIterator::operator=(const SelfType &other) { - copy(other); - return *this; -} - -} // namespace Json - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: src/lib_json/json_valueiterator.inl -// ////////////////////////////////////////////////////////////////////// - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: src/lib_json/json_value.cpp -// ////////////////////////////////////////////////////////////////////// - -// Copyright 2007-2010 Baptiste Lepilleur -// Distributed under MIT license, or public domain if desired and -// recognized in your jurisdiction. -// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE - -#if !defined(JSON_IS_AMALGAMATION) -#include -#include -#ifndef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR -#include "json_batchallocator.h" -#endif // #ifndef JSON_USE_SIMPLE_INTERNAL_ALLOCATOR -#endif // if !defined(JSON_IS_AMALGAMATION) -#include -#include -#include -#include -#include -#ifdef JSON_USE_CPPTL -#include -#endif -#include // size_t - -#define JSON_ASSERT_UNREACHABLE assert(false) -#define JSON_ASSERT(condition) assert(condition); // @todo <= change this into an exception throw -#define JSON_FAIL_MESSAGE(message) throw std::runtime_error(message); -#define JSON_ASSERT_MESSAGE(condition, message) \ - if (!(condition)) JSON_FAIL_MESSAGE(message) - -namespace Json { - -const Value Value::null; -const Int Value::minInt = Int(~(UInt(-1) / 2)); -const Int Value::maxInt = Int(UInt(-1) / 2); -const UInt Value::maxUInt = UInt(-1); -const Int64 Value::minInt64 = Int64(~(UInt64(-1) / 2)); -const Int64 Value::maxInt64 = Int64(UInt64(-1) / 2); -const UInt64 Value::maxUInt64 = UInt64(-1); -const LargestInt Value::minLargestInt = LargestInt(~(LargestUInt(-1) / 2)); -const LargestInt Value::maxLargestInt = LargestInt(LargestUInt(-1) / 2); -const LargestUInt Value::maxLargestUInt = LargestUInt(-1); - -/// Unknown size marker -static const unsigned int unknown = (unsigned)-1; - -/** Duplicates the specified string value. - * @param value Pointer to the string to duplicate. Must be zero-terminated if - * length is "unknown". - * @param length Length of the value. if equals to unknown, then it will be - * computed using strlen(value). - * @return Pointer on the duplicate instance of string. - */ -static inline char *duplicateStringValue(const char *value, unsigned int length = unknown) { - if (length == unknown) length = (unsigned int)strlen(value); - char *newString = static_cast(malloc(length + 1)); - JSON_ASSERT_MESSAGE(newString != 0, "Failed to allocate string value buffer"); - memcpy(newString, value, length); - newString[length] = 0; - return newString; -} - -/** Free the string duplicated by duplicateStringValue(). - */ -static inline void releaseStringValue(char *value) { - if (value) free(value); -} - -} // namespace Json - -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// ValueInternals... -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -#if !defined(JSON_IS_AMALGAMATION) -#ifdef JSON_VALUE_USE_INTERNAL_MAP -#include "json_internalarray.inl" -#include "json_internalmap.inl" -#endif // JSON_VALUE_USE_INTERNAL_MAP - -#include "json_valueiterator.inl" -#endif // if !defined(JSON_IS_AMALGAMATION) - -namespace Json { - -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// class Value::CommentInfo -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// - -Value::CommentInfo::CommentInfo() : comment_(0) {} - -Value::CommentInfo::~CommentInfo() { - if (comment_) releaseStringValue(comment_); -} - -void Value::CommentInfo::setComment(const char *text) { - if (comment_) releaseStringValue(comment_); - JSON_ASSERT(text != 0); - JSON_ASSERT_MESSAGE(text[0] == '\0' || text[0] == '/', "Comments must start with /"); - // It seems that /**/ style comments are acceptable as well. - comment_ = duplicateStringValue(text); -} - -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// class Value::CZString -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -#ifndef JSON_VALUE_USE_INTERNAL_MAP - -// Notes: index_ indicates if the string was allocated when -// a string is stored. - -Value::CZString::CZString(ArrayIndex index) : cstr_(0), index_(index) {} - -Value::CZString::CZString(const char *cstr, DuplicationPolicy allocate) - : cstr_(allocate == duplicate ? duplicateStringValue(cstr) : cstr), index_(allocate) {} - -Value::CZString::CZString(const CZString &other) - : cstr_(other.index_ != noDuplication && other.cstr_ != 0 ? duplicateStringValue(other.cstr_) : other.cstr_), - index_(other.cstr_ ? (other.index_ == noDuplication ? noDuplication : duplicate) : other.index_) {} - -Value::CZString::~CZString() { - if (cstr_ && index_ == duplicate) releaseStringValue(const_cast(cstr_)); -} - -void Value::CZString::swap(CZString &other) { - std::swap(cstr_, other.cstr_); - std::swap(index_, other.index_); -} - -Value::CZString &Value::CZString::operator=(const CZString &other) { - CZString temp(other); - swap(temp); - return *this; -} - -bool Value::CZString::operator<(const CZString &other) const { - if (cstr_) return strcmp(cstr_, other.cstr_) < 0; - return index_ < other.index_; -} - -bool Value::CZString::operator==(const CZString &other) const { - if (cstr_) return strcmp(cstr_, other.cstr_) == 0; - return index_ == other.index_; -} - -ArrayIndex Value::CZString::index() const { return index_; } - -const char *Value::CZString::c_str() const { return cstr_; } - -bool Value::CZString::isStaticString() const { return index_ == noDuplication; } - -#endif // ifndef JSON_VALUE_USE_INTERNAL_MAP - -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// class Value::Value -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// -// ////////////////////////////////////////////////////////////////// - -/*! \internal Default constructor initialization must be equivalent to: - * memset( this, 0, sizeof(Value) ) - * This optimization is used in ValueInternalMap fast allocator. - */ -Value::Value(ValueType type) - : type_(type), - allocated_(0), - comments_(0) -#ifdef JSON_VALUE_USE_INTERNAL_MAP - , - itemIsUsed_(0) -#endif -{ - switch (type) { - case nullValue: - break; - case intValue: - case uintValue: - value_.int_ = 0; - break; - case realValue: - value_.real_ = 0.0; - break; - case stringValue: - value_.string_ = 0; - break; -#ifndef JSON_VALUE_USE_INTERNAL_MAP - case arrayValue: - case objectValue: - value_.map_ = new ObjectValues(); - break; -#else - case arrayValue: - value_.array_ = arrayAllocator()->newArray(); - break; - case objectValue: - value_.map_ = mapAllocator()->newMap(); - break; -#endif - case booleanValue: - value_.bool_ = false; - break; - default: - JSON_ASSERT_UNREACHABLE; - } -} - -#if defined(JSON_HAS_INT64) -Value::Value(UInt value) - : type_(uintValue), - comments_(0) -#ifdef JSON_VALUE_USE_INTERNAL_MAP - , - itemIsUsed_(0) -#endif -{ - value_.uint_ = value; -} - -Value::Value(Int value) - : type_(intValue), - comments_(0) -#ifdef JSON_VALUE_USE_INTERNAL_MAP - , - itemIsUsed_(0) -#endif -{ - value_.int_ = value; -} - -#endif // if defined(JSON_HAS_INT64) - -Value::Value(Int64 value) - : type_(intValue), - comments_(0) -#ifdef JSON_VALUE_USE_INTERNAL_MAP - , - itemIsUsed_(0) -#endif -{ - value_.int_ = value; -} - -Value::Value(UInt64 value) - : type_(uintValue), - comments_(0) -#ifdef JSON_VALUE_USE_INTERNAL_MAP - , - itemIsUsed_(0) -#endif -{ - value_.uint_ = value; -} - -Value::Value(double value) - : type_(realValue), - comments_(0) -#ifdef JSON_VALUE_USE_INTERNAL_MAP - , - itemIsUsed_(0) -#endif -{ - value_.real_ = value; -} - -Value::Value(const char *value) - : type_(stringValue), - allocated_(true), - comments_(0) -#ifdef JSON_VALUE_USE_INTERNAL_MAP - , - itemIsUsed_(0) -#endif -{ - value_.string_ = duplicateStringValue(value); -} - -Value::Value(const char *beginValue, const char *endValue) - : type_(stringValue), - allocated_(true), - comments_(0) -#ifdef JSON_VALUE_USE_INTERNAL_MAP - , - itemIsUsed_(0) -#endif -{ - value_.string_ = duplicateStringValue(beginValue, (unsigned int)(endValue - beginValue)); -} - -Value::Value(const std::string &value) - : type_(stringValue), - allocated_(true), - comments_(0) -#ifdef JSON_VALUE_USE_INTERNAL_MAP - , - itemIsUsed_(0) -#endif -{ - value_.string_ = duplicateStringValue(value.c_str(), (unsigned int)value.length()); -} - -Value::Value(const StaticString &value) - : type_(stringValue), - allocated_(false), - comments_(0) -#ifdef JSON_VALUE_USE_INTERNAL_MAP - , - itemIsUsed_(0) -#endif -{ - value_.string_ = const_cast(value.c_str()); -} - -#ifdef JSON_USE_CPPTL -Value::Value(const CppTL::ConstString &value) - : type_(stringValue), - allocated_(true), - comments_(0) -#ifdef JSON_VALUE_USE_INTERNAL_MAP - , - itemIsUsed_(0) -#endif -{ - value_.string_ = duplicateStringValue(value, value.length()); -} -#endif - -Value::Value(bool value) - : type_(booleanValue), - comments_(0) -#ifdef JSON_VALUE_USE_INTERNAL_MAP - , - itemIsUsed_(0) -#endif -{ - value_.bool_ = value; -} - -Value::Value(const Value &other) - : type_(other.type_), - comments_(0) -#ifdef JSON_VALUE_USE_INTERNAL_MAP - , - itemIsUsed_(0) -#endif -{ - switch (type_) { - case nullValue: - case intValue: - case uintValue: - case realValue: - case booleanValue: - value_ = other.value_; - break; - case stringValue: - if (other.value_.string_) { - value_.string_ = duplicateStringValue(other.value_.string_); - allocated_ = true; - } else - value_.string_ = 0; - break; -#ifndef JSON_VALUE_USE_INTERNAL_MAP - case arrayValue: - case objectValue: - value_.map_ = new ObjectValues(*other.value_.map_); - break; -#else - case arrayValue: - value_.array_ = arrayAllocator()->newArrayCopy(*other.value_.array_); - break; - case objectValue: - value_.map_ = mapAllocator()->newMapCopy(*other.value_.map_); - break; -#endif - default: - JSON_ASSERT_UNREACHABLE; - } - if (other.comments_) { - comments_ = new CommentInfo[numberOfCommentPlacement]; - for (int comment = 0; comment < numberOfCommentPlacement; ++comment) { - const CommentInfo &otherComment = other.comments_[comment]; - if (otherComment.comment_) comments_[comment].setComment(otherComment.comment_); - } - } -} - -Value::~Value() { - switch (type_) { - case nullValue: - case intValue: - case uintValue: - case realValue: - case booleanValue: - break; - case stringValue: - if (allocated_) releaseStringValue(value_.string_); - break; -#ifndef JSON_VALUE_USE_INTERNAL_MAP - case arrayValue: - case objectValue: - delete value_.map_; - break; -#else - case arrayValue: - arrayAllocator()->destructArray(value_.array_); - break; - case objectValue: - mapAllocator()->destructMap(value_.map_); - break; -#endif - default: - JSON_ASSERT_UNREACHABLE; - } - - if (comments_) delete[] comments_; -} - -Value &Value::operator=(const Value &other) { - Value temp(other); - swap(temp); - return *this; -} - -void Value::swap(Value &other) { - ValueType temp = type_; - type_ = other.type_; - other.type_ = temp; - std::swap(value_, other.value_); - int temp2 = allocated_; - allocated_ = other.allocated_; - other.allocated_ = temp2; -} - -ValueType Value::type() const { return type_; } - -int Value::compare(const Value &other) const { - if (*this < other) return -1; - if (*this > other) return 1; - return 0; -} - -bool Value::operator<(const Value &other) const { - int typeDelta = type_ - other.type_; - if (typeDelta) return typeDelta < 0 ? true : false; - switch (type_) { - case nullValue: - return false; - case intValue: - return value_.int_ < other.value_.int_; - case uintValue: - return value_.uint_ < other.value_.uint_; - case realValue: - return value_.real_ < other.value_.real_; - case booleanValue: - return value_.bool_ < other.value_.bool_; - case stringValue: - return (value_.string_ == 0 && other.value_.string_) || - (other.value_.string_ && value_.string_ && strcmp(value_.string_, other.value_.string_) < 0); -#ifndef JSON_VALUE_USE_INTERNAL_MAP - case arrayValue: - case objectValue: { - int delta = int(value_.map_->size() - other.value_.map_->size()); - if (delta) return delta < 0; - return (*value_.map_) < (*other.value_.map_); - } -#else - case arrayValue: - return value_.array_->compare(*(other.value_.array_)) < 0; - case objectValue: - return value_.map_->compare(*(other.value_.map_)) < 0; -#endif - default: - JSON_ASSERT_UNREACHABLE; - } - return false; // unreachable -} - -bool Value::operator<=(const Value &other) const { return !(other < *this); } - -bool Value::operator>=(const Value &other) const { return !(*this < other); } - -bool Value::operator>(const Value &other) const { return other < *this; } - -bool Value::operator==(const Value &other) const { - // if ( type_ != other.type_ ) - // GCC 2.95.3 says: - // attempt to take address of bit-field structure member `Json::Value::type_' - // Beats me, but a temp solves the problem. - int temp = other.type_; - if (type_ != temp) return false; - switch (type_) { - case nullValue: - return true; - case intValue: - return value_.int_ == other.value_.int_; - case uintValue: - return value_.uint_ == other.value_.uint_; - case realValue: - return value_.real_ == other.value_.real_; - case booleanValue: - return value_.bool_ == other.value_.bool_; - case stringValue: - return (value_.string_ == other.value_.string_) || - (other.value_.string_ && value_.string_ && strcmp(value_.string_, other.value_.string_) == 0); -#ifndef JSON_VALUE_USE_INTERNAL_MAP - case arrayValue: - case objectValue: - return value_.map_->size() == other.value_.map_->size() && (*value_.map_) == (*other.value_.map_); -#else - case arrayValue: - return value_.array_->compare(*(other.value_.array_)) == 0; - case objectValue: - return value_.map_->compare(*(other.value_.map_)) == 0; -#endif - default: - JSON_ASSERT_UNREACHABLE; - } - return false; // unreachable -} - -bool Value::operator!=(const Value &other) const { return !(*this == other); } - -const char *Value::asCString() const { - JSON_ASSERT(type_ == stringValue); - return value_.string_; -} - -std::string Value::asString() const { - switch (type_) { - case nullValue: - return ""; - case stringValue: - return value_.string_ ? value_.string_ : ""; - case booleanValue: - return value_.bool_ ? "true" : "false"; - case intValue: - case uintValue: - case realValue: - case arrayValue: - case objectValue: - JSON_FAIL_MESSAGE("Type is not convertible to string"); - default: - JSON_ASSERT_UNREACHABLE; - } - return ""; // unreachable -} - -#ifdef JSON_USE_CPPTL -CppTL::ConstString Value::asConstString() const { return CppTL::ConstString(asString().c_str()); } -#endif - -Value::Int Value::asInt() const { - switch (type_) { - case nullValue: - return 0; - case intValue: - JSON_ASSERT_MESSAGE(value_.int_ >= minInt && value_.int_ <= maxInt, "unsigned integer out of signed int range"); - return Int(value_.int_); - case uintValue: - JSON_ASSERT_MESSAGE(value_.uint_ <= UInt(maxInt), "unsigned integer out of signed int range"); - return Int(value_.uint_); - case realValue: - JSON_ASSERT_MESSAGE(value_.real_ >= minInt && value_.real_ <= maxInt, "Real out of signed integer range"); - return Int(value_.real_); - case booleanValue: - return value_.bool_ ? 1 : 0; - case stringValue: - case arrayValue: - case objectValue: - JSON_FAIL_MESSAGE("Type is not convertible to int"); - default: - JSON_ASSERT_UNREACHABLE; - } - return 0; // unreachable; -} - -Value::UInt Value::asUInt() const { - switch (type_) { - case nullValue: - return 0; - case intValue: - JSON_ASSERT_MESSAGE(value_.int_ >= 0, "Negative integer can not be converted to unsigned integer"); - JSON_ASSERT_MESSAGE(value_.int_ <= maxUInt, "signed integer out of UInt range"); - return UInt(value_.int_); - case uintValue: - JSON_ASSERT_MESSAGE(value_.uint_ <= maxUInt, "unsigned integer out of UInt range"); - return UInt(value_.uint_); - case realValue: - JSON_ASSERT_MESSAGE(value_.real_ >= 0 && value_.real_ <= maxUInt, "Real out of unsigned integer range"); - return UInt(value_.real_); - case booleanValue: - return value_.bool_ ? 1 : 0; - case stringValue: - case arrayValue: - case objectValue: - JSON_FAIL_MESSAGE("Type is not convertible to uint"); - default: - JSON_ASSERT_UNREACHABLE; - } - return 0; // unreachable; -} - -#if defined(JSON_HAS_INT64) - -Value::Int64 Value::asInt64() const { - switch (type_) { - case nullValue: - return 0; - case intValue: - return value_.int_; - case uintValue: - JSON_ASSERT_MESSAGE(value_.uint_ <= UInt64(maxInt64), "unsigned integer out of Int64 range"); - return value_.uint_; - case realValue: - JSON_ASSERT_MESSAGE(value_.real_ >= minInt64 && value_.real_ <= maxInt64, "Real out of Int64 range"); - return Int(value_.real_); - case booleanValue: - return value_.bool_ ? 1 : 0; - case stringValue: - case arrayValue: - case objectValue: - JSON_FAIL_MESSAGE("Type is not convertible to Int64"); - default: - JSON_ASSERT_UNREACHABLE; - } - return 0; // unreachable; -} - -Value::UInt64 Value::asUInt64() const { - switch (type_) { - case nullValue: - return 0; - case intValue: - JSON_ASSERT_MESSAGE(value_.int_ >= 0, "Negative integer can not be converted to UInt64"); - return value_.int_; - case uintValue: - return value_.uint_; - case realValue: - JSON_ASSERT_MESSAGE(value_.real_ >= 0 && value_.real_ <= maxUInt64, "Real out of UInt64 range"); - return UInt(value_.real_); - case booleanValue: - return value_.bool_ ? 1 : 0; - case stringValue: - case arrayValue: - case objectValue: - JSON_FAIL_MESSAGE("Type is not convertible to UInt64"); - default: - JSON_ASSERT_UNREACHABLE; - } - return 0; // unreachable; -} -#endif // if defined(JSON_HAS_INT64) - -LargestInt Value::asLargestInt() const { -#if defined(JSON_NO_INT64) - return asInt(); -#else - return asInt64(); -#endif -} - -LargestUInt Value::asLargestUInt() const { -#if defined(JSON_NO_INT64) - return asUInt(); -#else - return asUInt64(); -#endif -} - -double Value::asDouble() const { - switch (type_) { - case nullValue: - return 0.0; - case intValue: - return static_cast(value_.int_); - case uintValue: -#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION) - return static_cast(value_.uint_); -#else // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION) - return static_cast(Int(value_.uint_ / 2)) * 2 + Int(value_.uint_ & 1); -#endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION) - case realValue: - return value_.real_; - case booleanValue: - return value_.bool_ ? 1.0 : 0.0; - case stringValue: - case arrayValue: - case objectValue: - JSON_FAIL_MESSAGE("Type is not convertible to double"); - default: - JSON_ASSERT_UNREACHABLE; - } - return 0; // unreachable; -} - -float Value::asFloat() const { - switch (type_) { - case nullValue: - return 0.0f; - case intValue: - return static_cast(value_.int_); - case uintValue: -#if !defined(JSON_USE_INT64_DOUBLE_CONVERSION) - return static_cast(value_.uint_); -#else // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION) - return static_cast(Int(value_.uint_ / 2)) * 2 + Int(value_.uint_ & 1); -#endif // if !defined(JSON_USE_INT64_DOUBLE_CONVERSION) - case realValue: - return static_cast(value_.real_); - case booleanValue: - return value_.bool_ ? 1.0f : 0.0f; - case stringValue: - case arrayValue: - case objectValue: - JSON_FAIL_MESSAGE("Type is not convertible to float"); - default: - JSON_ASSERT_UNREACHABLE; - } - return 0.0f; // unreachable; -} - -bool Value::asBool() const { - switch (type_) { - case nullValue: - return false; - case intValue: - case uintValue: - return value_.int_ != 0; - case realValue: - return value_.real_ != 0.0; - case booleanValue: - return value_.bool_; - case stringValue: - return value_.string_ && value_.string_[0] != 0; - case arrayValue: - case objectValue: - return value_.map_->size() != 0; - default: - JSON_ASSERT_UNREACHABLE; - } - return false; // unreachable; -} - -bool Value::isConvertibleTo(ValueType other) const { - switch (type_) { - case nullValue: - return true; - case intValue: - return (other == nullValue && value_.int_ == 0) || other == intValue || - (other == uintValue && value_.int_ >= 0) || other == realValue || other == stringValue || - other == booleanValue; - case uintValue: - return (other == nullValue && value_.uint_ == 0) || (other == intValue && value_.uint_ <= (unsigned)maxInt) || - other == uintValue || other == realValue || other == stringValue || other == booleanValue; - case realValue: - return (other == nullValue && value_.real_ == 0.0) || - (other == intValue && value_.real_ >= minInt && value_.real_ <= maxInt) || - (other == uintValue && value_.real_ >= 0 && value_.real_ <= maxUInt) || other == realValue || - other == stringValue || other == booleanValue; - case booleanValue: - return (other == nullValue && value_.bool_ == false) || other == intValue || other == uintValue || - other == realValue || other == stringValue || other == booleanValue; - case stringValue: - return other == stringValue || (other == nullValue && (!value_.string_ || value_.string_[0] == 0)); - case arrayValue: - return other == arrayValue || (other == nullValue && value_.map_->size() == 0); - case objectValue: - return other == objectValue || (other == nullValue && value_.map_->size() == 0); - default: - JSON_ASSERT_UNREACHABLE; - } - return false; // unreachable; -} - -/// Number of values in array or object -ArrayIndex Value::size() const { - switch (type_) { - case nullValue: - case intValue: - case uintValue: - case realValue: - case booleanValue: - case stringValue: - return 0; -#ifndef JSON_VALUE_USE_INTERNAL_MAP - case arrayValue: // size of the array is highest index + 1 - if (!value_.map_->empty()) { - ObjectValues::const_iterator itLast = value_.map_->end(); - --itLast; - return (*itLast).first.index() + 1; - } - return 0; - case objectValue: - return ArrayIndex(value_.map_->size()); -#else - case arrayValue: - return Int(value_.array_->size()); - case objectValue: - return Int(value_.map_->size()); -#endif - default: - JSON_ASSERT_UNREACHABLE; - } - return 0; // unreachable; -} - -bool Value::empty() const { - if (isNull() || isArray() || isObject()) - return size() == 0u; - else - return false; -} - -bool Value::operator!() const { return isNull(); } - -void Value::clear() { - JSON_ASSERT(type_ == nullValue || type_ == arrayValue || type_ == objectValue); - - switch (type_) { -#ifndef JSON_VALUE_USE_INTERNAL_MAP - case arrayValue: - case objectValue: - value_.map_->clear(); - break; -#else - case arrayValue: - value_.array_->clear(); - break; - case objectValue: - value_.map_->clear(); - break; -#endif - default: - break; - } -} - -void Value::resize(ArrayIndex newSize) { - JSON_ASSERT(type_ == nullValue || type_ == arrayValue); - if (type_ == nullValue) *this = Value(arrayValue); -#ifndef JSON_VALUE_USE_INTERNAL_MAP - ArrayIndex oldSize = size(); - if (newSize == 0) - clear(); - else if (newSize > oldSize) - (*this)[newSize - 1]; - else { - for (ArrayIndex index = newSize; index < oldSize; ++index) { - value_.map_->erase(index); - } - assert(size() == newSize); - } -#else - value_.array_->resize(newSize); -#endif -} - -Value &Value::operator[](ArrayIndex index) { - JSON_ASSERT(type_ == nullValue || type_ == arrayValue); - if (type_ == nullValue) *this = Value(arrayValue); -#ifndef JSON_VALUE_USE_INTERNAL_MAP - CZString key(index); - ObjectValues::iterator it = value_.map_->lower_bound(key); - if (it != value_.map_->end() && (*it).first == key) return (*it).second; - - ObjectValues::value_type defaultValue(key, null); - it = value_.map_->insert(it, defaultValue); - return (*it).second; -#else - return value_.array_->resolveReference(index); -#endif -} - -Value &Value::operator[](int index) { - JSON_ASSERT(index >= 0); - return (*this)[ArrayIndex(index)]; -} - -const Value &Value::operator[](ArrayIndex index) const { - JSON_ASSERT(type_ == nullValue || type_ == arrayValue); - if (type_ == nullValue) return null; -#ifndef JSON_VALUE_USE_INTERNAL_MAP - CZString key(index); - ObjectValues::const_iterator it = value_.map_->find(key); - if (it == value_.map_->end()) return null; - return (*it).second; -#else - Value *value = value_.array_->find(index); - return value ? *value : null; -#endif -} - -const Value &Value::operator[](int index) const { - JSON_ASSERT(index >= 0); - return (*this)[ArrayIndex(index)]; -} - -Value &Value::operator[](const char *key) { return resolveReference(key, false); } - -Value &Value::resolveReference(const char *key, bool isStatic) { - JSON_ASSERT(type_ == nullValue || type_ == objectValue); - if (type_ == nullValue) *this = Value(objectValue); -#ifndef JSON_VALUE_USE_INTERNAL_MAP - CZString actualKey(key, isStatic ? CZString::noDuplication : CZString::duplicateOnCopy); - ObjectValues::iterator it = value_.map_->lower_bound(actualKey); - if (it != value_.map_->end() && (*it).first == actualKey) return (*it).second; - - ObjectValues::value_type defaultValue(actualKey, null); - it = value_.map_->insert(it, defaultValue); - Value &value = (*it).second; - return value; -#else - return value_.map_->resolveReference(key, isStatic); -#endif -} - -Value Value::get(ArrayIndex index, const Value &defaultValue) const { - const Value *value = &((*this)[index]); - return value == &null ? defaultValue : *value; -} - -bool Value::isValidIndex(ArrayIndex index) const { return index < size(); } - -const Value &Value::operator[](const char *key) const { - JSON_ASSERT(type_ == nullValue || type_ == objectValue); - if (type_ == nullValue) return null; -#ifndef JSON_VALUE_USE_INTERNAL_MAP - CZString actualKey(key, CZString::noDuplication); - ObjectValues::const_iterator it = value_.map_->find(actualKey); - if (it == value_.map_->end()) return null; - return (*it).second; -#else - const Value *value = value_.map_->find(key); - return value ? *value : null; -#endif -} - -Value &Value::operator[](const std::string &key) { return (*this)[key.c_str()]; } - -const Value &Value::operator[](const std::string &key) const { return (*this)[key.c_str()]; } - -Value &Value::operator[](const StaticString &key) { return resolveReference(key, true); } - -#ifdef JSON_USE_CPPTL -Value &Value::operator[](const CppTL::ConstString &key) { return (*this)[key.c_str()]; } - -const Value &Value::operator[](const CppTL::ConstString &key) const { return (*this)[key.c_str()]; } -#endif - -Value &Value::append(const Value &value) { return (*this)[size()] = value; } - -Value Value::get(const char *key, const Value &defaultValue) const { - const Value *value = &((*this)[key]); - return value == &null ? defaultValue : *value; -} - -Value Value::get(const std::string &key, const Value &defaultValue) const { return get(key.c_str(), defaultValue); } - -Value Value::removeMember(const char *key) { - JSON_ASSERT(type_ == nullValue || type_ == objectValue); - if (type_ == nullValue) return null; -#ifndef JSON_VALUE_USE_INTERNAL_MAP - CZString actualKey(key, CZString::noDuplication); - ObjectValues::iterator it = value_.map_->find(actualKey); - if (it == value_.map_->end()) return null; - Value old(it->second); - value_.map_->erase(it); - return old; -#else - Value *value = value_.map_->find(key); - if (value) { - Value old(*value); - value_.map_.remove(key); - return old; - } else { - return null; - } -#endif -} - -Value Value::removeMember(const std::string &key) { return removeMember(key.c_str()); } - -#ifdef JSON_USE_CPPTL -Value Value::get(const CppTL::ConstString &key, const Value &defaultValue) const { - return get(key.c_str(), defaultValue); -} -#endif - -bool Value::isMember(const char *key) const { - const Value *value = &((*this)[key]); - return value != &null; -} - -bool Value::isMember(const std::string &key) const { return isMember(key.c_str()); } - -#ifdef JSON_USE_CPPTL -bool Value::isMember(const CppTL::ConstString &key) const { return isMember(key.c_str()); } -#endif - -Value::Members Value::getMemberNames() const { - JSON_ASSERT(type_ == nullValue || type_ == objectValue); - if (type_ == nullValue) return Value::Members(); - Members members; - members.reserve(value_.map_->size()); -#ifndef JSON_VALUE_USE_INTERNAL_MAP - ObjectValues::const_iterator it = value_.map_->begin(); - ObjectValues::const_iterator itEnd = value_.map_->end(); - for (; it != itEnd; ++it) members.push_back(std::string((*it).first.c_str())); -#else - ValueInternalMap::IteratorState it; - ValueInternalMap::IteratorState itEnd; - value_.map_->makeBeginIterator(it); - value_.map_->makeEndIterator(itEnd); - for (; !ValueInternalMap::equals(it, itEnd); ValueInternalMap::increment(it)) - members.push_back(std::string(ValueInternalMap::key(it))); -#endif - return members; -} -// -//# ifdef JSON_USE_CPPTL -// EnumMemberNames -// Value::enumMemberNames() const -//{ -// if ( type_ == objectValue ) -// { -// return CppTL::Enum::any( CppTL::Enum::transform( -// CppTL::Enum::keys( *(value_.map_), CppTL::Type() ), -// MemberNamesTransform() ) ); -// } -// return EnumMemberNames(); -//} -// -// -// EnumValues -// Value::enumValues() const -//{ -// if ( type_ == objectValue || type_ == arrayValue ) -// return CppTL::Enum::anyValues( *(value_.map_), -// CppTL::Type() ); -// return EnumValues(); -//} -// -//# endif - -bool Value::isNull() const { return type_ == nullValue; } - -bool Value::isBool() const { return type_ == booleanValue; } - -bool Value::isInt() const { return type_ == intValue; } - -bool Value::isUInt() const { return type_ == uintValue; } - -bool Value::isIntegral() const { return type_ == intValue || type_ == uintValue || type_ == booleanValue; } - -bool Value::isDouble() const { return type_ == realValue; } - -bool Value::isNumeric() const { return isIntegral() || isDouble(); } - -bool Value::isString() const { return type_ == stringValue; } - -bool Value::isArray() const { return type_ == nullValue || type_ == arrayValue; } - -bool Value::isObject() const { return type_ == nullValue || type_ == objectValue; } - -void Value::setComment(const char *comment, CommentPlacement placement) { - if (!comments_) comments_ = new CommentInfo[numberOfCommentPlacement]; - comments_[placement].setComment(comment); -} - -void Value::setComment(const std::string &comment, CommentPlacement placement) { - setComment(comment.c_str(), placement); -} - -bool Value::hasComment(CommentPlacement placement) const { - return comments_ != 0 && comments_[placement].comment_ != 0; -} - -std::string Value::getComment(CommentPlacement placement) const { - if (hasComment(placement)) return comments_[placement].comment_; - return ""; -} - -std::string Value::toStyledString() const { - StyledWriter writer; - return writer.write(*this); -} - -Value::const_iterator Value::begin() const { - switch (type_) { -#ifdef JSON_VALUE_USE_INTERNAL_MAP - case arrayValue: - if (value_.array_) { - ValueInternalArray::IteratorState it; - value_.array_->makeBeginIterator(it); - return const_iterator(it); - } - break; - case objectValue: - if (value_.map_) { - ValueInternalMap::IteratorState it; - value_.map_->makeBeginIterator(it); - return const_iterator(it); - } - break; -#else - case arrayValue: - case objectValue: - if (value_.map_) return const_iterator(value_.map_->begin()); - break; -#endif - default: - break; - } - return const_iterator(); -} - -Value::const_iterator Value::end() const { - switch (type_) { -#ifdef JSON_VALUE_USE_INTERNAL_MAP - case arrayValue: - if (value_.array_) { - ValueInternalArray::IteratorState it; - value_.array_->makeEndIterator(it); - return const_iterator(it); - } - break; - case objectValue: - if (value_.map_) { - ValueInternalMap::IteratorState it; - value_.map_->makeEndIterator(it); - return const_iterator(it); - } - break; -#else - case arrayValue: - case objectValue: - if (value_.map_) return const_iterator(value_.map_->end()); - break; -#endif - default: - break; - } - return const_iterator(); -} - -Value::iterator Value::begin() { - switch (type_) { -#ifdef JSON_VALUE_USE_INTERNAL_MAP - case arrayValue: - if (value_.array_) { - ValueInternalArray::IteratorState it; - value_.array_->makeBeginIterator(it); - return iterator(it); - } - break; - case objectValue: - if (value_.map_) { - ValueInternalMap::IteratorState it; - value_.map_->makeBeginIterator(it); - return iterator(it); - } - break; -#else - case arrayValue: - case objectValue: - if (value_.map_) return iterator(value_.map_->begin()); - break; -#endif - default: - break; - } - return iterator(); -} - -Value::iterator Value::end() { - switch (type_) { -#ifdef JSON_VALUE_USE_INTERNAL_MAP - case arrayValue: - if (value_.array_) { - ValueInternalArray::IteratorState it; - value_.array_->makeEndIterator(it); - return iterator(it); - } - break; - case objectValue: - if (value_.map_) { - ValueInternalMap::IteratorState it; - value_.map_->makeEndIterator(it); - return iterator(it); - } - break; -#else - case arrayValue: - case objectValue: - if (value_.map_) return iterator(value_.map_->end()); - break; -#endif - default: - break; - } - return iterator(); -} - -// class PathArgument -// ////////////////////////////////////////////////////////////////// - -PathArgument::PathArgument() : kind_(kindNone) {} - -PathArgument::PathArgument(ArrayIndex index) : index_(index), kind_(kindIndex) {} - -PathArgument::PathArgument(const char *key) : key_(key), kind_(kindKey) {} - -PathArgument::PathArgument(const std::string &key) : key_(key.c_str()), kind_(kindKey) {} - -// class Path -// ////////////////////////////////////////////////////////////////// - -Path::Path(const std::string &path, const PathArgument &a1, const PathArgument &a2, const PathArgument &a3, - const PathArgument &a4, const PathArgument &a5) { - InArgs in; - in.push_back(&a1); - in.push_back(&a2); - in.push_back(&a3); - in.push_back(&a4); - in.push_back(&a5); - makePath(path, in); -} - -void Path::makePath(const std::string &path, const InArgs &in) { - const char *current = path.c_str(); - const char *end = current + path.length(); - InArgs::const_iterator itInArg = in.begin(); - while (current != end) { - if (*current == '[') { - ++current; - if (*current == '%') - addPathInArg(path, in, itInArg, PathArgument::kindIndex); - else { - ArrayIndex index = 0; - for (; current != end && *current >= '0' && *current <= '9'; ++current) - index = index * 10 + ArrayIndex(*current - '0'); - args_.push_back(index); - } - if (current == end || *current++ != ']') invalidPath(path, int(current - path.c_str())); - } else if (*current == '%') { - addPathInArg(path, in, itInArg, PathArgument::kindKey); - ++current; - } else if (*current == '.') { - ++current; - } else { - const char *beginName = current; - while (current != end && !strchr("[.", *current)) ++current; - args_.push_back(std::string(beginName, current)); - } - } -} - -void Path::addPathInArg(const std::string &path, const InArgs &in, InArgs::const_iterator &itInArg, - PathArgument::Kind kind) { - if (itInArg == in.end()) { - // Error: missing argument %d - } else if ((*itInArg)->kind_ != kind) { - // Error: bad argument type - } else { - args_.push_back(**itInArg); - } -} - -void Path::invalidPath(const std::string &path, int location) { - // Error: invalid path. -} - -const Value &Path::resolve(const Value &root) const { - const Value *node = &root; - for (Args::const_iterator it = args_.begin(); it != args_.end(); ++it) { - const PathArgument &arg = *it; - if (arg.kind_ == PathArgument::kindIndex) { - if (!node->isArray() || node->isValidIndex(arg.index_)) { - // Error: unable to resolve path (array value expected at position... - } - node = &((*node)[arg.index_]); - } else if (arg.kind_ == PathArgument::kindKey) { - if (!node->isObject()) { - // Error: unable to resolve path (object value expected at position...) - } - node = &((*node)[arg.key_]); - if (node == &Value::null) { - // Error: unable to resolve path (object has no member named '' at - // position...) - } - } - } - return *node; -} - -Value Path::resolve(const Value &root, const Value &defaultValue) const { - const Value *node = &root; - for (Args::const_iterator it = args_.begin(); it != args_.end(); ++it) { - const PathArgument &arg = *it; - if (arg.kind_ == PathArgument::kindIndex) { - if (!node->isArray() || node->isValidIndex(arg.index_)) return defaultValue; - node = &((*node)[arg.index_]); - } else if (arg.kind_ == PathArgument::kindKey) { - if (!node->isObject()) return defaultValue; - node = &((*node)[arg.key_]); - if (node == &Value::null) return defaultValue; - } - } - return *node; -} - -Value &Path::make(Value &root) const { - Value *node = &root; - for (Args::const_iterator it = args_.begin(); it != args_.end(); ++it) { - const PathArgument &arg = *it; - if (arg.kind_ == PathArgument::kindIndex) { - if (!node->isArray()) { - // Error: node is not an array at position ... - } - node = &((*node)[arg.index_]); - } else if (arg.kind_ == PathArgument::kindKey) { - if (!node->isObject()) { - // Error: node is not an object at position... - } - node = &((*node)[arg.key_]); - } - } - return *node; -} - -} // namespace Json - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: src/lib_json/json_value.cpp -// ////////////////////////////////////////////////////////////////////// - -// ////////////////////////////////////////////////////////////////////// -// Beginning of content of file: src/lib_json/json_writer.cpp -// ////////////////////////////////////////////////////////////////////// - -// Copyright 2007-2010 Baptiste Lepilleur -// Distributed under MIT license, or public domain if desired and -// recognized in your jurisdiction. -// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE - -#if !defined(JSON_IS_AMALGAMATION) -#include -#include "json_tool.h" -#endif // if !defined(JSON_IS_AMALGAMATION) -#include -#include -#include -#include -#include -#include -#include - -#if _MSC_VER >= 1400 // VC++ 8.0 -#pragma warning(disable : 4996) // disable warning about strdup being deprecated. -#endif - -namespace Json { - -static bool containsControlCharacter(const char *str) { - while (*str) { - if (isControlCharacter(*(str++))) return true; - } - return false; -} - -std::string valueToString(LargestInt value) { - UIntToStringBuffer buffer; - char *current = buffer + sizeof(buffer); - bool isNegative = value < 0; - if (isNegative) value = -value; - uintToString(LargestUInt(value), current); - if (isNegative) *--current = '-'; - assert(current >= buffer); - return current; -} - -std::string valueToString(LargestUInt value) { - UIntToStringBuffer buffer; - char *current = buffer + sizeof(buffer); - uintToString(value, current); - assert(current >= buffer); - return current; -} - -#if defined(JSON_HAS_INT64) - -std::string valueToString(Int value) { return valueToString(LargestInt(value)); } - -std::string valueToString(UInt value) { return valueToString(LargestUInt(value)); } - -#endif // # if defined(JSON_HAS_INT64) - -std::string valueToString(double value) { - char buffer[32]; -#if defined(_MSC_VER) && defined(__STDC_SECURE_LIB__) // Use secure version with visual studio 2005 - // to avoid warning. - sprintf_s(buffer, sizeof(buffer), "%#.16g", value); -#else - sprintf(buffer, "%#.16g", value); -#endif - char *ch = buffer + strlen(buffer) - 1; - if (*ch != '0') return buffer; // nothing to truncate, so save time - while (ch > buffer && *ch == '0') { - --ch; - } - char *last_nonzero = ch; - while (ch >= buffer) { - switch (*ch) { - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - --ch; - continue; - case '.': - // Truncate zeroes to save bytes in output, but keep one. - *(last_nonzero + 2) = '\0'; - return buffer; - default: - return buffer; - } - } - return buffer; -} - -std::string valueToString(bool value) { return value ? "true" : "false"; } - -std::string valueToQuotedString(const char *value) { - // Not sure how to handle unicode... - if (strpbrk(value, "\"\\\b\f\n\r\t") == NULL && !containsControlCharacter(value)) - return std::string("\"") + value + "\""; - // We have to walk value and escape any special characters. - // Appending to std::string is not efficient, but this should be rare. - // (Note: forward slashes are *not* rare, but I am not escaping them.) - std::string::size_type maxsize = strlen(value) * 2 + 3; // allescaped+quotes+NULL - std::string result; - result.reserve(maxsize); // to avoid lots of mallocs - result += "\""; - for (const char *c = value; *c != 0; ++c) { - switch (*c) { - case '\"': - result += "\\\""; - break; - case '\\': - result += "\\\\"; - break; - case '\b': - result += "\\b"; - break; - case '\f': - result += "\\f"; - break; - case '\n': - result += "\\n"; - break; - case '\r': - result += "\\r"; - break; - case '\t': - result += "\\t"; - break; - // case '/': - // Even though \/ is considered a legal escape in JSON, a bare - // slash is also legal, so I see no reason to escape it. - // (I hope I am not misunderstanding something. - // blep notes: actually escaping \/ may be useful in javascript to avoid - // (*c); - result += oss.str(); - } else { - result += *c; - } - break; - } - } - result += "\""; - return result; -} - -// Class Writer -// ////////////////////////////////////////////////////////////////// -Writer::~Writer() {} - -// Class FastWriter -// ////////////////////////////////////////////////////////////////// - -FastWriter::FastWriter() : yamlCompatiblityEnabled_(false) {} - -void FastWriter::enableYAMLCompatibility() { yamlCompatiblityEnabled_ = true; } - -std::string FastWriter::write(const Value &root) { - document_ = ""; - writeValue(root); - return document_; -} - -void FastWriter::writeValue(const Value &value) { - switch (value.type()) { - case nullValue: - document_ += "null"; - break; - case intValue: - document_ += valueToString(value.asLargestInt()); - break; - case uintValue: - document_ += valueToString(value.asLargestUInt()); - break; - case realValue: - document_ += valueToString(value.asDouble()); - break; - case stringValue: - document_ += valueToQuotedString(value.asCString()); - break; - case booleanValue: - document_ += valueToString(value.asBool()); - break; - case arrayValue: { - document_ += "["; - int size = value.size(); - for (int index = 0; index < size; ++index) { - if (index > 0) document_ += ","; - writeValue(value[index]); - } - document_ += "]"; - } break; - case objectValue: { - Value::Members members(value.getMemberNames()); - document_ += "{"; - for (Value::Members::iterator it = members.begin(); it != members.end(); ++it) { - const std::string &name = *it; - if (it != members.begin()) document_ += ","; - document_ += valueToQuotedString(name.c_str()); - document_ += yamlCompatiblityEnabled_ ? ": " : ":"; - writeValue(value[name]); - } - document_ += "}"; - } break; - } -} - -// Class StyledWriter -// ////////////////////////////////////////////////////////////////// - -StyledWriter::StyledWriter() : rightMargin_(74), indentSize_(3) {} - -std::string StyledWriter::write(const Value &root) { - document_ = ""; - addChildValues_ = false; - indentString_ = ""; - writeCommentBeforeValue(root); - writeValue(root); - writeCommentAfterValueOnSameLine(root); - document_ += "\n"; - return document_; -} - -void StyledWriter::writeValue(const Value &value) { - switch (value.type()) { - case nullValue: - pushValue("null"); - break; - case intValue: - pushValue(valueToString(value.asLargestInt())); - break; - case uintValue: - pushValue(valueToString(value.asLargestUInt())); - break; - case realValue: - pushValue(valueToString(value.asDouble())); - break; - case stringValue: - pushValue(valueToQuotedString(value.asCString())); - break; - case booleanValue: - pushValue(valueToString(value.asBool())); - break; - case arrayValue: - writeArrayValue(value); - break; - case objectValue: { - Value::Members members(value.getMemberNames()); - if (members.empty()) - pushValue("{}"); - else { - writeWithIndent("{"); - indent(); - Value::Members::iterator it = members.begin(); - for (;;) { - const std::string &name = *it; - const Value &childValue = value[name]; - writeCommentBeforeValue(childValue); - writeWithIndent(valueToQuotedString(name.c_str())); - document_ += " : "; - writeValue(childValue); - if (++it == members.end()) { - writeCommentAfterValueOnSameLine(childValue); - break; - } - document_ += ","; - writeCommentAfterValueOnSameLine(childValue); - } - unindent(); - writeWithIndent("}"); - } - } break; - } -} - -void StyledWriter::writeArrayValue(const Value &value) { - unsigned size = value.size(); - if (size == 0) - pushValue("[]"); - else { - bool isArrayMultiLine = isMultineArray(value); - if (isArrayMultiLine) { - writeWithIndent("["); - indent(); - bool hasChildValue = !childValues_.empty(); - unsigned index = 0; - for (;;) { - const Value &childValue = value[index]; - writeCommentBeforeValue(childValue); - if (hasChildValue) - writeWithIndent(childValues_[index]); - else { - writeIndent(); - writeValue(childValue); - } - if (++index == size) { - writeCommentAfterValueOnSameLine(childValue); - break; - } - document_ += ","; - writeCommentAfterValueOnSameLine(childValue); - } - unindent(); - writeWithIndent("]"); - } else // output on a single line - { - assert(childValues_.size() == size); - document_ += "[ "; - for (unsigned index = 0; index < size; ++index) { - if (index > 0) document_ += ", "; - document_ += childValues_[index]; - } - document_ += " ]"; - } - } -} - -bool StyledWriter::isMultineArray(const Value &value) { - int size = value.size(); - bool isMultiLine = size * 3 >= rightMargin_; - childValues_.clear(); - for (int index = 0; index < size && !isMultiLine; ++index) { - const Value &childValue = value[index]; - isMultiLine = isMultiLine || ((childValue.isArray() || childValue.isObject()) && childValue.size() > 0); - } - if (!isMultiLine) // check if line length > max line length - { - childValues_.reserve(size); - addChildValues_ = true; - int lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]' - for (int index = 0; index < size && !isMultiLine; ++index) { - writeValue(value[index]); - lineLength += int(childValues_[index].length()); - isMultiLine = isMultiLine && hasCommentForValue(value[index]); - } - addChildValues_ = false; - isMultiLine = isMultiLine || lineLength >= rightMargin_; - } - return isMultiLine; -} - -void StyledWriter::pushValue(const std::string &value) { - if (addChildValues_) - childValues_.push_back(value); - else - document_ += value; -} - -void StyledWriter::writeIndent() { - if (!document_.empty()) { - char last = document_[document_.length() - 1]; - if (last == ' ') // already indented - return; - if (last != '\n') // Comments may add new-line - document_ += '\n'; - } - document_ += indentString_; -} - -void StyledWriter::writeWithIndent(const std::string &value) { - writeIndent(); - document_ += value; -} - -void StyledWriter::indent() { indentString_ += std::string(indentSize_, ' '); } - -void StyledWriter::unindent() { - assert(int(indentString_.size()) >= indentSize_); - indentString_.resize(indentString_.size() - indentSize_); -} - -void StyledWriter::writeCommentBeforeValue(const Value &root) { - if (!root.hasComment(commentBefore)) return; - document_ += normalizeEOL(root.getComment(commentBefore)); - document_ += "\n"; -} - -void StyledWriter::writeCommentAfterValueOnSameLine(const Value &root) { - if (root.hasComment(commentAfterOnSameLine)) document_ += " " + normalizeEOL(root.getComment(commentAfterOnSameLine)); - - if (root.hasComment(commentAfter)) { - document_ += "\n"; - document_ += normalizeEOL(root.getComment(commentAfter)); - document_ += "\n"; - } -} - -bool StyledWriter::hasCommentForValue(const Value &value) { - return value.hasComment(commentBefore) || value.hasComment(commentAfterOnSameLine) || value.hasComment(commentAfter); -} - -std::string StyledWriter::normalizeEOL(const std::string &text) { - std::string normalized; - normalized.reserve(text.length()); - const char *begin = text.c_str(); - const char *end = begin + text.length(); - const char *current = begin; - while (current != end) { - char c = *current++; - if (c == '\r') // mac or dos EOL - { - if (*current == '\n') // convert dos EOL - ++current; - normalized += '\n'; - } else // handle unix EOL & other char - normalized += c; - } - return normalized; -} - -// Class StyledStreamWriter -// ////////////////////////////////////////////////////////////////// - -StyledStreamWriter::StyledStreamWriter(std::string indentation) - : document_(NULL), rightMargin_(74), indentation_(indentation) {} - -void StyledStreamWriter::write(std::ostream &out, const Value &root) { - document_ = &out; - addChildValues_ = false; - indentString_ = ""; - writeCommentBeforeValue(root); - writeValue(root); - writeCommentAfterValueOnSameLine(root); - *document_ << "\n"; - document_ = NULL; // Forget the stream, for safety. -} - -void StyledStreamWriter::writeValue(const Value &value) { - switch (value.type()) { - case nullValue: - pushValue("null"); - break; - case intValue: - pushValue(valueToString(value.asLargestInt())); - break; - case uintValue: - pushValue(valueToString(value.asLargestUInt())); - break; - case realValue: - pushValue(valueToString(value.asDouble())); - break; - case stringValue: - pushValue(valueToQuotedString(value.asCString())); - break; - case booleanValue: - pushValue(valueToString(value.asBool())); - break; - case arrayValue: - writeArrayValue(value); - break; - case objectValue: { - Value::Members members(value.getMemberNames()); - if (members.empty()) - pushValue("{}"); - else { - writeWithIndent("{"); - indent(); - Value::Members::iterator it = members.begin(); - for (;;) { - const std::string &name = *it; - const Value &childValue = value[name]; - writeCommentBeforeValue(childValue); - writeWithIndent(valueToQuotedString(name.c_str())); - *document_ << " : "; - writeValue(childValue); - if (++it == members.end()) { - writeCommentAfterValueOnSameLine(childValue); - break; - } - *document_ << ","; - writeCommentAfterValueOnSameLine(childValue); - } - unindent(); - writeWithIndent("}"); - } - } break; - } -} - -void StyledStreamWriter::writeArrayValue(const Value &value) { - unsigned size = value.size(); - if (size == 0) - pushValue("[]"); - else { - bool isArrayMultiLine = isMultineArray(value); - if (isArrayMultiLine) { - writeWithIndent("["); - indent(); - bool hasChildValue = !childValues_.empty(); - unsigned index = 0; - for (;;) { - const Value &childValue = value[index]; - writeCommentBeforeValue(childValue); - if (hasChildValue) - writeWithIndent(childValues_[index]); - else { - writeIndent(); - writeValue(childValue); - } - if (++index == size) { - writeCommentAfterValueOnSameLine(childValue); - break; - } - *document_ << ","; - writeCommentAfterValueOnSameLine(childValue); - } - unindent(); - writeWithIndent("]"); - } else // output on a single line - { - assert(childValues_.size() == size); - *document_ << "[ "; - for (unsigned index = 0; index < size; ++index) { - if (index > 0) *document_ << ", "; - *document_ << childValues_[index]; - } - *document_ << " ]"; - } - } -} - -bool StyledStreamWriter::isMultineArray(const Value &value) { - int size = value.size(); - bool isMultiLine = size * 3 >= rightMargin_; - childValues_.clear(); - for (int index = 0; index < size && !isMultiLine; ++index) { - const Value &childValue = value[index]; - isMultiLine = isMultiLine || ((childValue.isArray() || childValue.isObject()) && childValue.size() > 0); - } - if (!isMultiLine) // check if line length > max line length - { - childValues_.reserve(size); - addChildValues_ = true; - int lineLength = 4 + (size - 1) * 2; // '[ ' + ', '*n + ' ]' - for (int index = 0; index < size && !isMultiLine; ++index) { - writeValue(value[index]); - lineLength += int(childValues_[index].length()); - isMultiLine = isMultiLine && hasCommentForValue(value[index]); - } - addChildValues_ = false; - isMultiLine = isMultiLine || lineLength >= rightMargin_; - } - return isMultiLine; -} - -void StyledStreamWriter::pushValue(const std::string &value) { - if (addChildValues_) - childValues_.push_back(value); - else - *document_ << value; -} - -void StyledStreamWriter::writeIndent() { - /* - Some comments in this method would have been nice. ;-) - - if ( !document_.empty() ) - { - char last = document_[document_.length()-1]; - if ( last == ' ' ) // already indented - return; - if ( last != '\n' ) // Comments may add new-line - *document_ << '\n'; - } - */ - *document_ << '\n' << indentString_; -} - -void StyledStreamWriter::writeWithIndent(const std::string &value) { - writeIndent(); - *document_ << value; -} - -void StyledStreamWriter::indent() { indentString_ += indentation_; } - -void StyledStreamWriter::unindent() { - assert(indentString_.size() >= indentation_.size()); - indentString_.resize(indentString_.size() - indentation_.size()); -} - -void StyledStreamWriter::writeCommentBeforeValue(const Value &root) { - if (!root.hasComment(commentBefore)) return; - *document_ << normalizeEOL(root.getComment(commentBefore)); - *document_ << "\n"; -} - -void StyledStreamWriter::writeCommentAfterValueOnSameLine(const Value &root) { - if (root.hasComment(commentAfterOnSameLine)) - *document_ << " " + normalizeEOL(root.getComment(commentAfterOnSameLine)); - - if (root.hasComment(commentAfter)) { - *document_ << "\n"; - *document_ << normalizeEOL(root.getComment(commentAfter)); - *document_ << "\n"; - } -} - -bool StyledStreamWriter::hasCommentForValue(const Value &value) { - return value.hasComment(commentBefore) || value.hasComment(commentAfterOnSameLine) || value.hasComment(commentAfter); -} - -std::string StyledStreamWriter::normalizeEOL(const std::string &text) { - std::string normalized; - normalized.reserve(text.length()); - const char *begin = text.c_str(); - const char *end = begin + text.length(); - const char *current = begin; - while (current != end) { - char c = *current++; - if (c == '\r') // mac or dos EOL - { - if (*current == '\n') // convert dos EOL - ++current; - normalized += '\n'; - } else // handle unix EOL & other char - normalized += c; - } - return normalized; -} - -std::ostream &operator<<(std::ostream &sout, const Value &root) { - Json::StyledStreamWriter writer; - writer.write(sout, root); - return sout; -} - -} // namespace Json - -// ////////////////////////////////////////////////////////////////////// -// End of content of file: src/lib_json/json_writer.cpp -// ////////////////////////////////////////////////////////////////////// diff --git a/third_party/junit/ctest2junit.xsl b/third_party/junit/ctest2junit.xsl new file mode 100644 index 0000000000..e7de63690f --- /dev/null +++ b/third_party/junit/ctest2junit.xsl @@ -0,0 +1,133 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + BuildName: + BuildStamp: + Name: + Generator: + CompilerName: + OSName: + Hostname: + OSRelease: + OSVersion: + OSPlatform: + Is64Bits: + VendorString: + VendorID: + FamilyID: + ModelID: + ProcessorCacheSize: + NumberOfLogicalCPU: + NumberOfPhysicalCPU: + TotalVirtualMemory: + TotalPhysicalMemory: + LogicalProcessorsPerPhysical: + ProcessorClockFrequency: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/thirdparty.spdx b/thirdparty.spdx index 2d70f8e944..883fa64032 100644 --- a/thirdparty.spdx +++ b/thirdparty.spdx @@ -1,11 +1,9 @@ -DocumentNamespace: https://github.com/advancedtelematic/aktualizr/ +DocumentNamespace: https://github.com/uptane/aktualizr/ DocumentName: SPDX-third_party SPDXID: SPDXRef-DOCUMENT Creator: Organization: HERE Technologies Relationship: SPDXRef-DOCUMENT DESCRIBES SPDXRef-googletest Relationship: SPDXRef-DOCUMENT DESCRIBES SPDXRef-jsoncpp -Relationship: SPDXRef-DOCUMENT DESCRIBES SPDXRef-HdrHistogram_c -Relationship: SPDXRef-DOCUMENT DESCRIBES SPDXRef-boost-program-options-accumulator Relationship: SPDXRef-DOCUMENT DESCRIBES SPDXRef-boost-filesystem Relationship: SPDXRef-DOCUMENT DESCRIBES SPDXRef-boost-program-options Relationship: SPDXRef-DOCUMENT DESCRIBES SPDXRef-boost-log @@ -17,13 +15,13 @@ Relationship: SPDXRef-DOCUMENT DESCRIBES SPDXRef-libostree Relationship: SPDXRef-DOCUMENT DESCRIBES SPDXRef-sqlite3 Relationship: SPDXRef-DOCUMENT DESCRIBES SPDXRef-asn1c Relationship: SPDXRef-DOCUMENT DESCRIBES SPDXRef-libp11 -Relationship: SPDXRef-DOCUMENT DESCRIBES SPDXRef-systemd +Relationship: SPDXRef-DOCUMENT DESCRIBES SPDXRef-ctest2junit.xsl PackageName: googletest SPDXID: SPDXRef-googletest -PackageVersion: 1.8.0 -PackageDownloadLocation: https://github.com/google/googletest/archive/release-1.8.0.zip +PackageVersion: 1.8.1 +PackageDownloadLocation: https://github.com/google/googletest/archive/release-1.8.1.zip PackageHomePage: https://github.com/google/googletest PackageLicenseConcluded: BSD-3-Clause PackageLicenseDeclared: BSD-3-Clause @@ -36,8 +34,8 @@ PackageComment: Testing only. PackageName: jsoncpp SPDXID: SPDXRef-jsoncpp -PackageVersion: 1.6.0 -PackageDownloadLocation: https://github.com/open-source-parsers/jsoncpp/archive/1.6.0.zip +PackageVersion: 1.8.4 +PackageDownloadLocation: https://github.com/open-source-parsers/jsoncpp/archive/1.8.4.zip PackageHomePage: https://github.com/open-source-parsers/jsoncpp PackageLicenseConcluded: (MIT OR LicenseRef-jsoncpp-public-domain) PackageLicenseDeclared: (MIT OR LicenseRef-jsoncpp-public-domain) @@ -47,63 +45,43 @@ PackageCopyrightText: Copyright (c) 2007-2010 by Baptiste Lepilleur and Th FilesAnalyzed: false -PackageName: HdrHistogram_c -SPDXID: SPDXRef-HdrHistogram_c -PackageDownloadLocation: https://github.com/HdrHistogram/HdrHistogram_c/archive/0.9.7.tar.gz -PackageHomePage: https://github.com/HdrHistogram/HdrHistogram_c -PackageLicenseConcluded: BSD-2-Clause -PackageLicenseDeclared: BSD-2-Clause -PackageLicenseInfoFromFiles: BSD-2-Clause -PackageCopyrightText: Copyright (c) 2012, 2013, 2014 Gil Tene -Copyright (c) 2014 Michael Barker -Copyright (c) 2014 Matt Warren -All rights reserved. -FilesAnalyzed: false -PackageComment: Testing only. - - -PackageName: boost-program-options-accumulator -SPDXID: SPDXRef-boost-program-options-accumulator -PackageDownloadLocation: https://github.com/bskari/sqlassie/blob/master/src/accumulator.hpp -PackageHomePage: http://benjaminwolsey.de/node/123 -PackageLicenseConcluded: BSL-1.0 -PackageLicenseDeclared: BSL-1.0 -PackageLicenseInfoFromFiles: BSL-1.0 -PackageCopyrightText: (C) Copyright benjaminwolsey.de 2010-2011. -FilesAnalyzed: false - - PackageName: boost-filesystem SPDXID: SPDXRef-boost-filesystem -PackageDownloadLocation: https://dl.bintray.com/boostorg/release/1.65.1/source/boost_1_65_1.tar.bz2 +PackageVersion: 1.58.0 +PackageDownloadLocation: https://dl.bintray.com/boostorg/release/1.58.0/source/boost_1_58_0.tar.bz2 PackageHomePage: http://www.boost.org/ PackageLicenseConcluded: BSL-1.0 PackageLicenseDeclared: BSL-1.0 PackageLicenseInfoFromFiles: BSL-1.0 PackageCopyrightText: © Copyright Beman Dawes, 2002-2005 FilesAnalyzed: false +PackageComment: Dynamically linked. PackageName: boost-program-options SPDXID: SPDXRef-boost-program-options -PackageDownloadLocation: https://dl.bintray.com/boostorg/release/1.65.1/source/boost_1_65_1.tar.bz2 +PackageVersion: 1.58.0 +PackageDownloadLocation: https://dl.bintray.com/boostorg/release/1.58.0/source/boost_1_58_0.tar.bz2 PackageHomePage: http://www.boost.org/ PackageLicenseConcluded: BSL-1.0 PackageLicenseDeclared: BSL-1.0 PackageLicenseInfoFromFiles: BSL-1.0 PackageCopyrightText: Copyright © 2002-2004 Vladimir Prus FilesAnalyzed: false +PackageComment: Dynamically linked. PackageName: boost-log SPDXID: SPDXRef-boost-log -PackageDownloadLocation: https://dl.bintray.com/boostorg/release/1.65.1/source/boost_1_65_1.tar.bz2 +PackageVersion: 1.58.0 +PackageDownloadLocation: https://dl.bintray.com/boostorg/release/1.58.0/source/boost_1_58_0.tar.bz2 PackageHomePage: http://www.boost.org/ PackageLicenseConcluded: BSL-1.0 PackageLicenseDeclared: BSL-1.0 PackageLicenseInfoFromFiles: BSL-1.0 PackageCopyrightText: Copyright © 2007-2016 Andrey Semashev FilesAnalyzed: false +PackageComment: Dynamically linked. PackageName: libcurl @@ -208,17 +186,16 @@ FilesAnalyzed: false PackageComment: Dynamically linked. -PackageName: systemd -SPDXID: SPDXRef-systemd -PackageDownloadLocation: https://github.com/systemd/systemd/archive/v238.tar.gz -PackageHomePage: http://www.freedesktop.org/wiki/Software/systemd -PackageLicenseConcluded: (GPL-2.0-only AND LGPL-2.1-only) -PackageLicenseDeclared: (GPL-2.0-only AND LGPL-2.1-only) -PackageLicenseInfoFromFiles: GPL-2.0-only -PackageLicenseInfoFromFiles: LGPL-2.1-only +PackageName: ctest2junit.xsl +SPDXID: SPDXRef-ctest2junit.xsl +PackageDownloadLocation: https://github.com/manticoresoftware/manticoresearch/tree/master/misc/junit/ctest2junit.xsl +PackageHomePage: https://github.com/manticoresoftware/manticoresearch +PackageLicenseConcluded: GPLv2 +PackageLicenseDeclared: GPLv2 +PackageLicenseInfoFromFiles: GPLv2 PackageCopyrightText: NONE FilesAnalyzed: false -PackageComment: Dynamically linked. +PackageComment: Testing only. LicenseID: LicenseRef-jsoncpp-public-domain