From c53bdc8d5189bb6757979a783928f197a25efee9 Mon Sep 17 00:00:00 2001 From: thxCode Date: Thu, 4 Jan 2024 10:35:41 +0800 Subject: [PATCH] feat: first commit Signed-off-by: thxCode --- .commitsar.yml | 5 + .gitattributes | 4 + .github/workflows/ci.yml | 122 +++++++++++++ .github/workflows/clean.yml | 82 +++++++++ .gitignore | 32 ++++ .golangci.yaml | 207 ++++++++++++++++++++++ LICENSE | 191 +++++++++++++++++++++ Makefile | 48 ++++++ README.md | 199 ++++++++++++++++++++++ cmd/apis/api.go | 22 +++ cmd/apis/api_serve.go | 37 ++++ cmd/plugins/plugin.go | 26 +++ cmd/plugins/plugin_aws.go | 37 ++++ cmd/plugins/plugin_azure.go | 37 ++++ cmd/plugins/plugin_gcp.go | 37 ++++ go.mod | 51 ++++++ go.sum | 148 ++++++++++++++++ hack/build.sh | 62 +++++++ hack/ci.sh | 22 +++ hack/deps.sh | 32 ++++ hack/lib/init.sh | 24 +++ hack/lib/log.sh | 161 ++++++++++++++++++ hack/lib/style.sh | 321 +++++++++++++++++++++++++++++++++++ hack/lib/target.sh | 64 +++++++ hack/lib/util.sh | 179 +++++++++++++++++++ hack/lib/version.sh | 81 +++++++++ hack/lint.sh | 56 ++++++ hack/release.sh | 33 ++++ hack/test.sh | 65 +++++++ main.go | 174 +++++++++++++++++++ pkg/apis/client.go | 18 ++ pkg/apis/router.go | 17 ++ pkg/apis/server/server.go | 139 +++++++++++++++ pkg/bytespool/pool.go | 61 +++++++ pkg/cache/cache.go | 30 ++++ pkg/cache/file.go | 291 +++++++++++++++++++++++++++++++ pkg/cache/memory.go | 275 ++++++++++++++++++++++++++++++ pkg/cache/singlefilght.go | 56 ++++++ pkg/consts/const.go | 26 +++ pkg/json/jsoniter.go | 101 +++++++++++ pkg/json/std.go | 69 ++++++++ pkg/plugins/aws/client.go | 142 ++++++++++++++++ pkg/plugins/aws/server.go | 108 ++++++++++++ pkg/plugins/aws/token.go | 208 +++++++++++++++++++++++ pkg/plugins/azure/client.go | 139 +++++++++++++++ pkg/plugins/azure/server.go | 104 ++++++++++++ pkg/plugins/azure/token.go | 173 +++++++++++++++++++ pkg/plugins/gcp/client.go | 139 +++++++++++++++ pkg/plugins/gcp/server.go | 104 ++++++++++++ pkg/plugins/gcp/token.go | 160 +++++++++++++++++ pkg/signal/context.go | 42 +++++ pkg/signal/signal_posix.go | 13 ++ pkg/signal/signal_windows.go | 9 + pkg/token/token.go | 91 ++++++++++ pkg/version/version.go | 75 ++++++++ 55 files changed, 5149 insertions(+) create mode 100644 .commitsar.yml create mode 100644 .gitattributes create mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/clean.yml create mode 100644 .gitignore create mode 100644 .golangci.yaml create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.md create mode 100644 cmd/apis/api.go create mode 100644 cmd/apis/api_serve.go create mode 100644 cmd/plugins/plugin.go create mode 100644 cmd/plugins/plugin_aws.go create mode 100644 cmd/plugins/plugin_azure.go create mode 100644 cmd/plugins/plugin_gcp.go create mode 100644 go.mod create mode 100644 go.sum create mode 100755 hack/build.sh create mode 100755 hack/ci.sh create mode 100755 hack/deps.sh create mode 100644 hack/lib/init.sh create mode 100644 hack/lib/log.sh create mode 100644 hack/lib/style.sh create mode 100644 hack/lib/target.sh create mode 100644 hack/lib/util.sh create mode 100644 hack/lib/version.sh create mode 100755 hack/lint.sh create mode 100755 hack/release.sh create mode 100755 hack/test.sh create mode 100644 main.go create mode 100644 pkg/apis/client.go create mode 100644 pkg/apis/router.go create mode 100644 pkg/apis/server/server.go create mode 100644 pkg/bytespool/pool.go create mode 100644 pkg/cache/cache.go create mode 100644 pkg/cache/file.go create mode 100644 pkg/cache/memory.go create mode 100644 pkg/cache/singlefilght.go create mode 100644 pkg/consts/const.go create mode 100644 pkg/json/jsoniter.go create mode 100644 pkg/json/std.go create mode 100644 pkg/plugins/aws/client.go create mode 100644 pkg/plugins/aws/server.go create mode 100644 pkg/plugins/aws/token.go create mode 100644 pkg/plugins/azure/client.go create mode 100644 pkg/plugins/azure/server.go create mode 100644 pkg/plugins/azure/token.go create mode 100644 pkg/plugins/gcp/client.go create mode 100644 pkg/plugins/gcp/server.go create mode 100644 pkg/plugins/gcp/token.go create mode 100644 pkg/signal/context.go create mode 100644 pkg/signal/signal_posix.go create mode 100644 pkg/signal/signal_windows.go create mode 100644 pkg/token/token.go create mode 100644 pkg/version/version.go diff --git a/.commitsar.yml b/.commitsar.yml new file mode 100644 index 0000000..c97c73b --- /dev/null +++ b/.commitsar.yml @@ -0,0 +1,5 @@ +commits: + disabled: false + strict: false + limit: 100 + all: false diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..15f3964 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,4 @@ +* text=auto eol=lf + +**/*.pb.go linguist-generated=true +staging/**/go.sum linguist-generated=true diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..cfd77ad --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,122 @@ +name: ci + +permissions: + contents: read + pull-requests: read + actions: read + +env: + VERSION: "${{ github.ref_name }}" + BUILD_PLATFORMS: "linux/amd64,linux/arm64,darwin/amd64,darwin/arm64,windows/amd64" + PARALLELIZE: "false" + GO_VERSION: "1.21.4" + +defaults: + run: + shell: bash + +on: + workflow_dispatch: { } + push: + tags: + - "v*.*.*" + branches: + - "main" + paths-ignore: + - "docs/**" + - "**.md" + - "**.mdx" + - "**.png" + - "**.jpg" + pull_request: + branches: + - "main" + paths-ignore: + - "docs/**" + - "**.md" + - "**.mdx" + - "**.png" + - "**.jpg" + +jobs: + build: + timeout-minutes: 60 + runs-on: ubuntu-22.04 + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + # checkout the whole histories for commitsar, + # currently commitsar needs full git objects to work correctly. + fetch-depth: 0 + persist-credentials: false + - name: Setup Go + timeout-minutes: 15 + uses: actions/setup-go@v5 + with: + go-version: "${{ env.GO_VERSION }}" + cache-dependency-path: | + **/go.sum + - name: Setup Toolbox + timeout-minutes: 5 + uses: actions/cache@v3 + with: + # restore/save service binaries, e.g. goimports, golangci-lint, commitsar. + key: toolbox-${{ runner.os }} + path: | + ${{ github.workspace }}/.sbin + - name: Build + run: make ci + env: + LINT_DIRTY: "true" + - name: Archive Publish Result + uses: actions/cache/save@v3 + with: + # save package resources, e.g. go build result, downloaded UI, entrypoint script. + key: archive-${{ runner.os }}-${{ github.sha }} + path: | + ${{ github.workspace }}/.dist/build + + release: + if: ${{ startsWith(github.ref, 'refs/tags/') }} + needs: + - build + permissions: + contents: write + actions: read + id-token: write + timeout-minutes: 20 + runs-on: ubuntu-22.04 + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 1 + persist-credentials: false + - name: Unarchive Publish Result + timeout-minutes: 5 + uses: actions/cache/restore@v3 + with: + # restore package resources, e.g. go build result, downloaded UI, entrypoint script. + key: archive-${{ runner.os }}-${{ github.sha }} + path: | + ${{ github.workspace }}/.dist/build + - name: Import GPG key + id: import_gpg + uses: crazy-max/ghaction-import-gpg@v5 + with: + gpg_private_key: ${{ secrets.CI_GPG_PRIVATE_KEY }} + passphrase: ${{ secrets.CI_GPG_PASSPHRASE }} + - name: Sign Checksum + run: make release + env: + GPG_FINGERPRINT: ${{ steps.import_gpg.outputs.fingerprint }} + - name: Release + uses: softprops/action-gh-release@v1 + with: + token: ${{ github.token }} + fail_on_unmatched_files: true + tag_name: ${{ steps.metadata.outputs.version }} + prerelease: ${{ contains(github.ref, 'rc') }} + files: | + .dist/build/kubecia/* diff --git a/.github/workflows/clean.yml b/.github/workflows/clean.yml new file mode 100644 index 0000000..a9c350e --- /dev/null +++ b/.github/workflows/clean.yml @@ -0,0 +1,82 @@ +name: clean + +permissions: + contents: write + pull-requests: read + actions: write + +defaults: + run: + shell: bash + +on: + schedule: + - cron: '0 */12 * * *' + workflow_dispatch: { } + +jobs: + clean: + timeout-minutes: 5 + runs-on: ubuntu-22.04 + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 1 + persist-credentials: false + - name: Remove Cache + uses: actions/github-script@v7 + with: + # clean up caches, + # ref to https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries, + # and https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache. + script: | + const owner = context.repo.owner + const repo = context.repo.repo + var deleteCaches = new Array() + + // get candidate items. + const { data: cs } = await github.rest.actions.getActionsCacheList({ + owner: owner, + repo: repo, + }); + for (const c of cs.actions_caches) { + // clean closed pull request's caches. + if (c.ref.match(/^refs\/pull\/.*$/)) { + var prNum = c.ref.replace(/[^\d]/g, "") + const { data: pr } = await github.rest.pulls.get({ + owner: owner, + repo: repo, + pull_number: prNum, + }) + if (pr.state === 'closed') { + deleteCaches.push(c) + } + continue + } + // do not clean toolbox caches. + if (c.key.match(/^toolbox-.*$/)) { + continue + } + // clean push archived caches. + if (c.key.match(/^archive-.*$/)) { + deleteCaches.push(c) + continue + } + // clean stale built caches. + if (!c.key.match(/^setup-go-.*-${{ hashFiles('**/go.sum') }}$/)) { + deleteCaches.push(c) + continue + } + } + + // delete + for (const c of deleteCaches) { + await github.rest.actions.deleteActionsCacheById({ + owner: owner, + repo: repo, + cache_id: c.id, + }) + console.log(`cleaned cache "${c.key}"`) + } + continue-on-error: true diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..8501165 --- /dev/null +++ b/.gitignore @@ -0,0 +1,32 @@ +# Files +.DS_Store +*.lock +*.test +*.out +*.swp +*.swo +*.db +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.log +go.work +go.work.* + +# Dirs +.idea/ +.vscode/ +.kube/ +.terraform/ +.vagrant/ +.bundle/ +.cache/ +.docker/ +.entc/ +.sbin/ +.dist/ +log/ +certs/ +tmp/ diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 0000000..01a503e --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,207 @@ +run: + timeout: 10m + tests: true + skip-files: + - "doc.go" + modules-download-mode: readonly + +# output configuration options +output: + # Format: colored-line-number|line-number|json|tab|checkstyle|code-climate|junit-xml|github-actions + # + # Multiple can be specified by separating them by comma, output can be provided + # for each of them by separating format name and path by colon symbol. + # Output path can be either `stdout`, `stderr` or path to the file to write to. + # Example: "checkstyle:report.json,colored-line-number" + # + # Default: colored-line-number + format: colored-line-number + # Print lines of code with issue. + # Default: true + print-issued-lines: true + # Print linter name in the end of issue text. + # Default: true + print-linter-name: true + # Make issues output unique by line. + # Default: true + uniq-by-line: true + # Add a prefix to the output file references. + # Default is no prefix. + path-prefix: "" + # Sort results by: filepath, line and column. + sort-results: true + +linters: + disable-all: true + enable: + - asciicheck + - bidichk + - decorder + - durationcheck + - errcheck + - errname + - errorlint + - exportloopref + - godot + - goconst + - gofumpt + - gocritic + - gosimple + - gosec + - govet + - ineffassign + - lll + - makezero + - misspell + - misspell + - nakedret + - nilerr + - prealloc + - predeclared + - revive + - staticcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - usestdlibvars + - whitespace + + # enable complexity linters + # - dupl + # - gocognit + # - gocyclo + # - funlen + +linters-settings: + staticcheck: + checks: ["all", "-SA1019", "-SA2002", "-SA5008"] + stylecheck: + checks: ["all", "-ST1003"] + gosec: + severity: "low" + confidence: "low" + excludes: + - G101 + - G112 + revive: + rules: + - name: var-naming + disabled: true + arguments: + - ["HTTP", "ID", "TLS", "TCP", "UDP", "API", "CA", "URL", "DNS"] + godot: + # Comments to be checked: `declarations`, `toplevel`, or `all`. + # Default: declarations + scope: all + # List of regexps for excluding particular comment lines from check. + # Default: [] + exclude: + # Exclude todo and fixme comments. + - "^fixme:" + - "^todo:" + # Check that each sentence ends with a period. + # Default: true + period: true + # Check that each sentence starts with a capital letter. + # Default: false + capital: true + lll: + # max line length, lines longer will be reported. Default is 120. + # '\t' is counted as 1 character by default, and can be changed with the tab-width option + line-length: 150 + # tab width in spaces. Default to 1. + tab-width: 1 + goconst: + # Minimal length of string constant. + # Default: 3 + min-len: 3 + # Minimum occurrences of constant string count to trigger issue. + # Default: 3 + min-occurrences: 3 + misspell: + # Correct spellings using locale preferences for US or UK. + # Default is to use a neutral variety of English. + # Setting locale to US will correct the British spelling of 'colour' to 'color'. + locale: US + unparam: + # Inspect exported functions, default is false. Set to true if no external program/library imports your code. + # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: + # if it's called for subdir of a project it can't find external interfaces. All text editor integrations + # with golangci-lint call it on a directory with the changed file. + check-exported: false + unused: + # Mark all struct fields that have been written to as used. + # Default: true + field-writes-are-uses: true + # Treat IncDec statement (e.g. `i++` or `i--`) as both read and write operation instead of just write. + # Default: false + post-statements-are-reads: true + # Mark all exported identifiers as used. + # Default: true + exported-is-used: true + # Mark all exported fields as used. + # default: true + exported-fields-are-used: true + # Mark all function parameters as used. + # default: true + parameters-are-used: true + # Mark all local variables as used. + # default: true + local-variables-are-used: true + # Mark all identifiers inside generated files as used. + # Default: true + generated-is-used: true + errorlint: + # Check whether fmt.Errorf uses the %w verb for formatting errors. See the readme for caveats + errorf: true + # Check for plain type assertions and type switches + asserts: true + # Check for plain error comparisons + comparison: true + makezero: + always: false + gosimple: + go: "1.19" + checks: ["all"] + nakedret: + max-func-lines: 60 + usestdlibvars: + # Suggest the use of http.MethodXX + # Default: true + http-method: true + # Suggest the use of http.StatusXX + # Default: true + http-status-code: true + # Suggest the use of time.Weekday + # Default: true + time-weekday: true + # Suggest the use of time.Month + # Default: false + time-month: true + # Suggest the use of time.Layout + # Default: false + time-layout: true + # Suggest the use of crypto.Hash + # Default: false + crypto-hash: true + decorder: + dec-order: + - const + - var + - func + disable-init-func-first-check: false + disable-dec-order-check: true + +issues: + exclude-rules: + - path: _test\.go + linters: + - errcheck + - gosec + - rowserrcheck + - makezero + - lll + - funlen + - wsl diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..aedc377 --- /dev/null +++ b/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2023 Seal, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..6eb4eae --- /dev/null +++ b/Makefile @@ -0,0 +1,48 @@ +SHELL := /bin/bash + +# Borrowed from https://stackoverflow.com/questions/18136918/how-to-get-current-relative-directory-of-your-makefile +curr_dir := $(patsubst %/,%,$(dir $(abspath $(lastword $(MAKEFILE_LIST))))) + +# Borrowed from https://stackoverflow.com/questions/2214575/passing-arguments-to-make-run +rest_args := $(wordlist 2, $(words $(MAKECMDGOALS)), $(MAKECMDGOALS)) +$(eval $(rest_args):;@:) + +targets := $(shell ls $(curr_dir)/hack | grep '.sh' | sed 's/\.sh//g') +$(targets): + @$(curr_dir)/hack/$@.sh $(rest_args) + +help: + # + # Usage: + # + # * [dev] `make deps`, get dependencies. + # + # * [dev] `make lint`, check style. + # - `BUILD_TAGS="jsoniter" make lint` check with specified tags. + # - `LINT_DIRTY=true make lint` verify whether the code tree is dirty. + # + # * [dev] `make test`, execute unit testing. + # - `BUILD_TAGS="jsoniter" make test` test with specified tags. + # + # * [dev] `make build`, execute cross building. + # - `VERSION=vX.y.z+l.m make build` build all targets with vX.y.z+l.m version. + # - `OS=linux ARCH=arm64 make build` build all targets run on linux/arm64 arch. + # - `BUILD_TAGS="jsoniter" make build` build with specified tags. + # - `BUILD_PLATFORMS="linux/amd64,linux/arm64" make build` do multiple platforms go build. + # + # * [dev] `make package`, embed running resources into a Docker image on one platform. + # - `REPO=xyz make package` package all targets named with xyz repository. + # - `VERSION=vX.y.z+l.m make package` package all targets named with vX.y.z-l.m tag. + # - `TAG=main make package` package all targets named with main tag. + # - `OS=linux ARCH=arm64 make package` package all targets run on linux/arm64 arch. + # - `PACKAGE_BUILD=false make package` prepare build resource but disable docker build. + # - `DOCKER_USERNAME=... DOCKER_PASSWORD=... PACKAGE_PUSH=true make package` execute docker push after build. + # + # * [ci] `make ci`, execute `make deps`, `make lint`, `make test`, `make build` and `make package`. + # - `CI_CHECK=false make ci` only execute `make build` and `make package`. + # - `CI_PUBLISH=false make ci` only execute `make deps`, `make lint` and `make test`. + # + @echo + +.DEFAULT_GOAL := build +.PHONY: $(targets) diff --git a/README.md b/README.md new file mode 100644 index 0000000..8d32b34 --- /dev/null +++ b/README.md @@ -0,0 +1,199 @@ +# Kubernetes Cloud Identify Authenticator(KubeCIA) + +> tl;dr: Available [client-go credential (exec) plugin](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins), no Cloud Provider CLI required. + +[![](https://goreportcard.com/badge/github.com/seal-io/kubecia)](https://goreportcard.com/report/github.com/seal-io/kubecia) +[![](https://img.shields.io/github/actions/workflow/status/seal-io/kubecia/ci.yml?label=ci)](https://github.com/seal-io/kubecia/actions) +[![](https://img.shields.io/github/v/tag/seal-io/kubecia?label=release)](https://github.com/seal-io/kubecia/releases) +[![](https://img.shields.io/github/downloads/seal-io/kubecia/total)](https://github.com/seal-io/kubecia/releases) +[![](https://img.shields.io/github/license/seal-io/kubecia?label=license)](https://github.com/seal-io/kubecia#license) + +## Background + +Since Kubernetes v1.22, we can use external credential plugins to authenticate with Kubernetes clusters. However, using +external credential plugins requires the Cloud Provider CLI to be installed, for some scenarios, such as CI/CD, it is +overkill and not friendly to automation task preparation. + +```shell +$ docker images | grep cli + +gcr.io/google.com/cloudsdktool/google-cloud-cli latest 275a080b472c 20 hours ago 2.82GB +public.ecr.aws/aws-cli/aws-cli latest 09df25bd783c 2 days ago 415MB +mcr.microsoft.com/azure-cli latest 6cf11f9134f2 5 weeks ago 722MB +``` + +KubeCIA, which is a lightweight and easy-to-use credential plugin for Kubernetes, is born to reduce the dependency of +Cloud Provider CLI. + +## Usage + +KubeCIA can call the Cloud Provider API to get the credential and consume the local filesystem as caching. The following +example shows how to use KubeCIA to get credentials for EKS cluster. + +```yaml +apiVersion: v1 +kind: Config +users: + - name: eks-user + user: + exec: + # -- KubeCIA only supports `client.authentication.k8s.io/v1` API version. + apiVersion: "client.authentication.k8s.io/v1" + # -- API version `client.authentication.k8s.io/v1` needs configuring `interactiveMode`. + interactiveMode: Never + command: "kubecia" + args: + - "aws" + env: + # -- KubeCIA can retrieve the environment variables prefixed with `KUBECIA_`, + # -- second segment must be the upper case of the sub command. + - name: KUBECIA_AWS_ACCESS_KEY_ID + value: + - name: KUBECIA_AWS_SECRET_ACCESS_KEY + # -- For sensitive value, KubeCIA will try to expand from the environment variable. + value: "$AWS_SECRET_ACCESS_KEY" + - name: KUBECIA_AWS_REGION + value: + - name: KUBECIA_AWS_CLUSTER + value: + - name: KUBECIA_AWS_ASSUME_ROLE_ARN + value: +clusters: + - name: eks-cluster + cluster: + server: + certificate-authority: +contexts: + - name: eks-cluster + context: + cluster: eks-cluster + user: eks-user +current-context: eks-cluster +``` + +### Centralized Service Mode + +KubeCIA can be set up as a centralized service by `kubecia serve` command. + +```shell +$ kubecia serve --socket /var/run/kubecia.sock +``` + +Under this mode, the above configuration can also work. + +When acting as a sidecar, main containers can +use any Unix socket tool to call centralized KubeCIA service, the following example shows how to +use [cURL(7.40.0+)](https://curl.se/libcurl/c/CURLOPT_UNIX_SOCKET_PATH.html) to get. + +```yaml +apiVersion: v1 +kind: Config +users: + - name: eks-user + user: + exec: + apiVersion: "client.authentication.k8s.io/v1" + command: "curl" + args: + - "--silent" + - "--output" + - "-" + - "--location" + - "--unix-socket" + # -- KubeCIA service will listen on this Unix socket at default, change it by `--socket` flag. + - "/var/run/kubecia.sock" + - "--user" + # -- The service principal credentials, e.g. the AWS access_key_id and secret_access_key, the Azure client_id and client_secret, + # -- are required to be provided via `Authentication` header. + - ":" + - "http:/./aws///" + interactiveMode: Never +clusters: + - name: eks-cluster + cluster: + server: + certificate-authority: +contexts: + - name: eks-cluster + context: + cluster: eks-cluster + user: eks-user +current-context: eks-cluster +``` + +But describing the sensitive credentials in the configuration file is not recommended, it is recommended to use the +shell injection as below. + +```yaml +apiVersion: v1 +kind: Config +users: + - name: eks-user + user: + exec: + apiVersion: "client.authentication.k8s.io/v1" + command: "/bin/bash" + args: + - "-c" + - "curl --silent --output - --location --unix-socket /var/run/kubecia.sock --user ${AWS_ACCESS_KEY_ID}:${AWS_SECRET_ACCESS_KEY} http:/./aws/${AWS_REGION}/${EKS_CLUSTER}/${EKS_ROLE_ARN}" + env: + ## + ## AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are optional present at here, + ## there can be provided by environment variables. + ## + # - name: AWS_ACCESS_KEY_ID + # value: + # - name: AWS_SECRET_ACCESS_KEY + # value: + - name: AWS_REGION + value: + - name: EKS_CLUSTER + value: + - name: EKS_ROLE_ARN + value: + interactiveMode: Never +clusters: + - name: eks-cluster + cluster: + server: + certificate-authority: +contexts: + - name: eks-cluster + context: + cluster: eks-cluster + user: eks-user +current-context: eks-cluster +``` + +## Notice + +KubeCIA only response result with `apiVersion: "client.authentication.k8s.io/v1"`, please update the kubectl if not +supported. + +KubeCIA focuses on obtaining the token for accessing the Kubernetes cluster based on the user's service principal +credential, to other features or modes, please review the below links. + +- [kubernetes-sigs/aws-iam-authenticator](https://github.com/kubernetes-sigs/aws-iam-authenticator) +- [Azure/kubelogin](https://github.com/Azure/kubelogin) +- [Here's what to know about changes to kubectl authentication coming in GKE v1.26.](https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke) + +KubeCIA establishes on Unix socket, to expose the service to the network, please use the socket proxy, like +[ncat](https://nmap.org/ncat/guide/index.html). + +```shell +$ ncat --verbose --listen --keep-open --source-port 80 --sh-exec "ncat --unixsock /var/run/kubecia.sock" +``` + +# License + +Copyright (c) 2024 [Seal, Inc.](https://seal.io) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at [LICENSE](./LICENSE) file for details. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/cmd/apis/api.go b/cmd/apis/api.go new file mode 100644 index 0000000..8ea598a --- /dev/null +++ b/cmd/apis/api.go @@ -0,0 +1,22 @@ +package apis + +import "github.com/spf13/cobra" + +func AddCommands(c *cobra.Command) { + var ( + g = &cobra.Group{ + ID: "api", + Title: `API commands`, + } + cs = []*cobra.Command{ + NewServe(), + } + ) + + c.AddGroup(g) + + for i := range cs { + cs[i].GroupID = g.ID + c.AddCommand(cs[i]) + } +} diff --git a/cmd/apis/api_serve.go b/cmd/apis/api_serve.go new file mode 100644 index 0000000..42120bf --- /dev/null +++ b/cmd/apis/api_serve.go @@ -0,0 +1,37 @@ +package apis + +import ( + "github.com/spf13/cobra" + + "github.com/seal-io/kubecia/pkg/apis/server" + "github.com/seal-io/kubecia/pkg/plugins/aws" + "github.com/seal-io/kubecia/pkg/plugins/azure" + "github.com/seal-io/kubecia/pkg/plugins/gcp" +) + +func NewServe() *cobra.Command { + var srv server.Server + + c := &cobra.Command{ + Use: "serve", + Short: "Serve KubeCIA APIs.", + SilenceUsage: true, + RunE: func(c *cobra.Command, args []string) error { + ss := server.ServeFuncs{ + aws.Serve, + azure.Serve, + gcp.Serve, + } + + for i := range ss { + srv.Register(ss[i]) + } + + return srv.Serve(c.Context()) + }, + } + + srv.AddFlags(c.Flags()) + + return c +} diff --git a/cmd/plugins/plugin.go b/cmd/plugins/plugin.go new file mode 100644 index 0000000..c9fa2bb --- /dev/null +++ b/cmd/plugins/plugin.go @@ -0,0 +1,26 @@ +package plugins + +import ( + "github.com/spf13/cobra" +) + +func AddCommands(c *cobra.Command) { + var ( + g = &cobra.Group{ + ID: "plugin", + Title: `Plugin commands`, + } + cs = []*cobra.Command{ + NewAWS(), + NewAzure(), + NewGCP(), + } + ) + + c.AddGroup(g) + + for i := range cs { + cs[i].GroupID = g.ID + c.AddCommand(cs[i]) + } +} diff --git a/cmd/plugins/plugin_aws.go b/cmd/plugins/plugin_aws.go new file mode 100644 index 0000000..d6081f3 --- /dev/null +++ b/cmd/plugins/plugin_aws.go @@ -0,0 +1,37 @@ +package plugins + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/seal-io/kubecia/pkg/plugins/aws" +) + +func NewAWS() *cobra.Command { + var cli aws.Client + + c := &cobra.Command{ + Use: "aws", + Short: "Get AWS token.", + SilenceUsage: true, + RunE: func(c *cobra.Command, args []string) error { + tk, err := cli.GetToken(c.Context()) + if err != nil { + return err + } + + bs, err := tk.ToKubeClientExecCredentialJSON() + if err != nil { + return fmt.Errorf("error converting token to kube client exec credential json: %w", err) + } + + c.Print(string(bs)) + return nil + }, + } + + cli.AddFlags(c.Flags()) + + return c +} diff --git a/cmd/plugins/plugin_azure.go b/cmd/plugins/plugin_azure.go new file mode 100644 index 0000000..d51bf3c --- /dev/null +++ b/cmd/plugins/plugin_azure.go @@ -0,0 +1,37 @@ +package plugins + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/seal-io/kubecia/pkg/plugins/azure" +) + +func NewAzure() *cobra.Command { + var cli azure.Client + + c := &cobra.Command{ + Use: "azure", + Short: "Get Azure token.", + SilenceUsage: true, + RunE: func(c *cobra.Command, args []string) error { + tk, err := cli.GetToken(c.Context()) + if err != nil { + return err + } + + bs, err := tk.ToKubeClientExecCredentialJSON() + if err != nil { + return fmt.Errorf("error converting token to kube client exec credential json: %w", err) + } + + c.Print(string(bs)) + return nil + }, + } + + cli.AddFlags(c.Flags()) + + return c +} diff --git a/cmd/plugins/plugin_gcp.go b/cmd/plugins/plugin_gcp.go new file mode 100644 index 0000000..0e4e9a5 --- /dev/null +++ b/cmd/plugins/plugin_gcp.go @@ -0,0 +1,37 @@ +package plugins + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/seal-io/kubecia/pkg/plugins/gcp" +) + +func NewGCP() *cobra.Command { + var cli gcp.Client + + c := &cobra.Command{ + Use: "gcp", + Short: "Get GCP token.", + SilenceUsage: true, + RunE: func(c *cobra.Command, args []string) error { + tk, err := cli.GetToken(c.Context()) + if err != nil { + return err + } + + bs, err := tk.ToKubeClientExecCredentialJSON() + if err != nil { + return fmt.Errorf("error converting token to kube client exec credential json: %w", err) + } + + c.Print(string(bs)) + return nil + }, + } + + cli.AddFlags(c.Flags()) + + return c +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..4b64256 --- /dev/null +++ b/go.mod @@ -0,0 +1,51 @@ +module github.com/seal-io/kubecia + +go 1.21 + +require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 + github.com/allegro/bigcache/v3 v3.1.0 + github.com/aws/aws-sdk-go v1.49.16 + github.com/dustin/go-humanize v1.0.1 + github.com/json-iterator/go v1.1.12 + github.com/spf13/afero v1.11.0 + github.com/spf13/cobra v1.8.0 + github.com/spf13/pflag v1.0.5 + golang.org/x/mod v0.14.0 + golang.org/x/oauth2 v0.15.0 + golang.org/x/sync v0.6.0 + k8s.io/apimachinery v0.29.0 + k8s.io/client-go v0.29.0 + k8s.io/klog/v2 v2.110.1 + k8s.io/utils v0.0.0-20240102154912-e7106e64919e +) + +require ( + cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v5 v5.0.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.4.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + golang.org/x/crypto v0.16.0 // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..9b531c8 --- /dev/null +++ b/go.sum @@ -0,0 +1,148 @@ +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/allegro/bigcache/v3 v3.1.0 h1:H2Vp8VOvxcrB91o86fUSVJFqeuz8kpyyB02eH3bSzwk= +github.com/allegro/bigcache/v3 v3.1.0/go.mod h1:aPyh7jEvrog9zAwx5N7+JUQX5dZTSGpxF1LAR4dr35I= +github.com/aws/aws-sdk-go v1.49.16 h1:KAQwhLg296hfffRdh+itA9p7Nx/3cXS/qOa3uF9ssig= +github.com/aws/aws-sdk-go v1.49.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= +github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= +golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= +k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= +k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= +k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/hack/build.sh b/hack/build.sh new file mode 100755 index 0000000..7772d43 --- /dev/null +++ b/hack/build.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)" +source "${ROOT_DIR}/hack/lib/init.sh" + +BUILD_DIR="${ROOT_DIR}/.dist/build" +mkdir -p "${BUILD_DIR}" + +function build() { + local target="$1" + local path="$2" + + seal::log::debug "building ${target}" + + local ldflags=( + "-X github.com/seal-io/kubecia/pkg/version.Version=${GIT_VERSION}" + "-X github.com/seal-io/kubecia/pkg/version.GitCommit=${GIT_COMMIT}" + "-w -s" + "-extldflags '-static'" + ) + + local tags=() + # shellcheck disable=SC2086 + IFS=" " read -r -a tags <<<"$(seal::target::build_tags ${target})" + + local platforms=() + # shellcheck disable=SC2086 + IFS=" " read -r -a platforms <<<"$(seal::target::build_platforms ${target})" + + for platform in "${platforms[@]}"; do + local os_arch + IFS="/" read -r -a os_arch <<<"${platform}" + local os="${os_arch[0]}" + local arch="${os_arch[1]}" + + local suffix="" + if [[ "${os}" == "windows" ]]; then + suffix=".exe" + fi + + GOOS=${os} GOARCH=${arch} CGO_ENABLED=0 go build \ + -trimpath \ + -ldflags="${ldflags[*]}" \ + -tags="${os} ${tags[*]}" \ + -o="${BUILD_DIR}/${target}/${target}-${os}-${arch}${suffix}" \ + "${path}" + done +} + +# +# main +# + +seal::log::info "+++ BUILD +++" "info: ${GIT_VERSION},${GIT_COMMIT:0:7},${GIT_TREE_STATE},${BUILD_DATE}" + +build "kubecia" "${ROOT_DIR}" "$@" + +seal::log::info "--- BUILD ---" diff --git a/hack/ci.sh b/hack/ci.sh new file mode 100755 index 0000000..8987467 --- /dev/null +++ b/hack/ci.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)" +pushd "${ROOT_DIR}" >/dev/null 2>&1 + +# check phase +if [[ "${CI_CHECK:-true}" == "true" ]]; then + make deps "$@" + make lint "$@" + make test "$@" +fi + +# publish phase +if [[ "${CI_PUBLISH:-true}" == "true" ]]; then + make build "$@" +fi + +popd >/dev/null 2>&1 diff --git a/hack/deps.sh b/hack/deps.sh new file mode 100755 index 0000000..c554573 --- /dev/null +++ b/hack/deps.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)" +source "${ROOT_DIR}/hack/lib/init.sh" + +function mod() { + local target="$1" + local path="$2" + + seal::log::debug "modding ${target}" + + [[ "${path}" == "${ROOT_DIR}" ]] || pushd "${path}" >/dev/null 2>&1 + + go mod tidy + go mod download + + [[ "${path}" == "${ROOT_DIR}" ]] || popd >/dev/null 2>&1 +} + +# +# main +# + +seal::log::info "+++ MOD +++" + +mod "kubecia" "${ROOT_DIR}" "$@" + +seal::log::info "--- MOD ---" diff --git a/hack/lib/init.sh b/hack/lib/init.sh new file mode 100644 index 0000000..7a0eba8 --- /dev/null +++ b/hack/lib/init.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +unset CDPATH + +# Set no_proxy for localhost if behind a proxy, otherwise, +# the connections to localhost in scripts will time out. +export no_proxy=127.0.0.1,localhost + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)" +mkdir -p "${ROOT_DIR}/.sbin" + +for file in "${ROOT_DIR}/hack/lib/"*; do + if [[ -f "${file}" ]] && [[ "${file}" != *"init.sh" ]]; then + # shellcheck disable=SC1090 + source "${file}" + fi +done + +seal::log::install_errexit +seal::version::get_version_vars diff --git a/hack/lib/log.sh b/hack/lib/log.sh new file mode 100644 index 0000000..989f8fb --- /dev/null +++ b/hack/lib/log.sh @@ -0,0 +1,161 @@ +#!/usr/bin/env bash + +## +# Borrowed from github.com/kubernetes/kubernetes/hack/lib/logging.sh +## + +# ----------------------------------------------------------------------------- +# Logger variables helpers. These functions need the +# following variables: +# +# LOG_LEVEL - The level of logger, default is "debug". + +log_level="${LOG_LEVEL:-"debug"}" +log_colorful="${LOG_COLORFUL:-"true"}" + +# Handler for when we exit automatically on an error. +seal::log::errexit() { + local err="${PIPESTATUS[*]}" + + # if the shell we are in doesn't have errexit set (common in subshells) then + # don't dump stacks. + set +o | grep -qe "-o errexit" || return + + set +o xtrace + seal::log::panic "${BASH_SOURCE[1]}:${BASH_LINENO[0]} '${BASH_COMMAND}' exited with status ${err}" "${1:-1}" +} + +seal::log::install_errexit() { + # trap ERR to provide an error handler whenever a command exits nonzero, this + # is a more verbose version of set -o errexit + trap 'seal::log::errexit' ERR + + # setting errtrace allows our ERR trap handler to be propagated to functions, + # expansions and subshells + set -o errtrace +} + +# Debug level logging. +seal::log::debug() { + [[ ${log_level} == "debug" ]] || return 0 + local message="${2:-}" + + local timestamp + timestamp="$(date +"[%m%d %H:%M:%S]")" + echo -e "[DEBG] ${timestamp} ${1-}" >&2 + shift 1 + for message; do + echo -e " ${message}" >&2 + done +} + +# Info level logging. +seal::log::info() { + [[ ${log_level} == "debug" ]] || [[ ${log_level} == "info" ]] || return 0 + local message="${2:-}" + + local timestamp + timestamp="$(date +"[%m%d %H:%M:%S]")" + if [[ ${log_colorful} == "true" ]]; then + echo -e "\033[34m[INFO]\033[0m ${timestamp} ${1-}" >&2 + else + echo -e "[INFO] ${timestamp} ${1-}" >&2 + fi + shift 1 + for message; do + echo -e " ${message}" >&2 + done +} + +# Warn level logging. +seal::log::warn() { + local message="${2:-}" + + local timestamp + timestamp="$(date +"[%m%d %H:%M:%S]")" + if [[ ${log_colorful} == "true" ]]; then + echo -e "\033[33m[WARN]\033[0m ${timestamp} ${1-}" >&2 + else + echo -e "[WARN] ${timestamp} ${1-}" >&2 + fi + shift 1 + for message; do + echo -e " ${message}" >&2 + done +} + +# Error level logging, log an error but keep going, don't dump the stack or exit. +seal::log::error() { + local message="${2:-}" + + local timestamp + timestamp="$(date +"[%m%d %H:%M:%S]")" + if [[ ${log_colorful} == "true" ]]; then + echo -e "\033[31m[ERRO]\033[0m ${timestamp} ${1-}" >&2 + else + echo -e "[ERRO] ${timestamp} ${1-}" >&2 + fi + shift 1 + for message; do + echo -e " ${message}" >&2 + done +} + +# Fatal level logging, log an error but exit with 1, don't dump the stack or exit. +seal::log::fatal() { + local message="${2:-}" + + local timestamp + timestamp="$(date +"[%m%d %H:%M:%S]")" + if [[ ${log_colorful} == "true" ]]; then + echo -e "\033[41;33m[FATA]\033[0m ${timestamp} ${1-}" >&2 + else + echo -e "[FATA] ${timestamp} ${1-}" >&2 + fi + shift 1 + for message; do + echo -e " ${message}" >&2 + done + + exit 1 +} + +# Panic level logging, dump the error stack and exit. +# Args: +# $1 Message to log with the error +# $2 The error code to return +# $3 The number of stack frames to skip when printing. +seal::log::panic() { + local message="${1:-}" + local code="${2:-1}" + + local timestamp + timestamp="$(date +"[%m%d %H:%M:%S]")" + if [[ ${log_colorful} == "true" ]]; then + echo -e "\033[41;33m[FATA]\033[0m ${timestamp} ${message}" >&2 + else + echo -e "[FATA] ${timestamp} ${message}" >&2 + fi + + # print out the stack trace described by $function_stack + if [[ ${#FUNCNAME[@]} -gt 2 ]]; then + if [[ ${log_colorful} == "true" ]]; then + echo -e "\033[31m call stack:\033[0m" >&2 + else + echo -e " call stack:" >&2 + fi + local i + for ((i = 1; i < ${#FUNCNAME[@]} - 2; i++)); do + echo -e " ${i}: ${BASH_SOURCE[${i} + 2]}:${BASH_LINENO[${i} + 1]} ${FUNCNAME[${i} + 1]}(...)" >&2 + done + fi + + if [[ ${log_colorful} == "true" ]]; then + echo -e "\033[41;33m[FATA]\033[0m ${timestamp} exiting with status ${code}" >&2 + else + echo -e "[FATA] ${timestamp} exiting with status ${code}" >&2 + fi + + popd >/dev/null 2>&1 || exit "${code}" + exit "${code}" +} diff --git a/hack/lib/style.sh b/hack/lib/style.sh new file mode 100644 index 0000000..9d63f3a --- /dev/null +++ b/hack/lib/style.sh @@ -0,0 +1,321 @@ +#!/usr/bin/env bash + +# ----------------------------------------------------------------------------- +# Lint variables helpers. These functions need the +# following variables: +# +# GOLANGCI_LINT_VERSION - The Golangci-lint version, default is v1.55.2. +# COMMITSAR_VERSION - The Commitsar version, default is v0.20.2. + +golangci_lint_version=${GOLANGCI_LINT_VERSION:-"v1.55.2"} +commitsar_version=${COMMITSAR_VERSION:-"v0.20.2"} + +function seal::lint::golangci_lint::install() { + curl --retry 3 --retry-all-errors --retry-delay 3 -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b "${ROOT_DIR}/.sbin" "${golangci_lint_version}" +} + +function seal::lint::golangci_lint::validate() { + # shellcheck disable=SC2046 + if [[ -n "$(command -v $(seal::lint::golangci_lint::bin))" ]]; then + if [[ $($(seal::lint::golangci_lint::bin) --version 2>&1 | cut -d " " -f 4 2>&1 | head -n 1) == "${golangci_lint_version#v}" ]]; then + return 0 + fi + fi + + seal::log::info "installing golangci-lint ${golangci_lint_version}" + if seal::lint::golangci_lint::install; then + seal::log::info "golangci_lint $($(seal::lint::golangci_lint::bin) --version 2>&1 | cut -d " " -f 4 2>&1 | head -n 1)" + return 0 + fi + seal::log::error "no golangci-lint available" + return 1 +} + +function seal::lint::golangci_lint::bin() { + local bin="golangci-lint" + if [[ -f "${ROOT_DIR}/.sbin/golangci-lint" ]]; then + bin="${ROOT_DIR}/.sbin/golangci-lint" + fi + echo -n "${bin}" +} + +function seal::lint::run() { + if ! seal::lint::golangci_lint::validate; then + seal::log::warn "using go fmt/vet instead golangci-lint" + shift 1 + local fmt_args=() + local vet_args=() + for arg in "$@"; do + if [[ "${arg}" == "--build-tags="* ]]; then + arg="${arg//--build-/-}" + vet_args+=("${arg}") + continue + fi + fmt_args+=("${arg}") + vet_args+=("${arg}") + done + seal::log::debug "go fmt ${fmt_args[*]}" + go fmt "${fmt_args[@]}" + seal::log::debug "go vet ${vet_args[*]}" + go vet "${vet_args[@]}" + return 0 + fi + + seal::log::debug "golangci-lint run --fix $*" + $(seal::lint::golangci_lint::bin) run --fix "$@" +} + +function seal::format::goimports::install() { + GOBIN="${ROOT_DIR}/.sbin" go install github.com/incu6us/goimports-reviser/v3@latest +} + +function seal::format::goimports::validate() { + # shellcheck disable=SC2046 + if [[ -n "$(command -v $(seal::format::goimports::bin))" ]]; then + return 0 + fi + + seal::log::info "installing goimports" + if seal::format::goimports::install; then + return 0 + fi + seal::log::error "no goimports-reviser available" + return 1 +} + +function seal::format::goimports::bin() { + local bin="goimports-reviser" + if [[ -f "${ROOT_DIR}/.sbin/goimports-reviser" ]]; then + bin="${ROOT_DIR}/.sbin/goimports-reviser" + fi + echo -n "${bin}" +} + +function seal::format::gofumpt::install() { + GOBIN="${ROOT_DIR}/.sbin" go install mvdan.cc/gofumpt@latest +} + +function seal::format::gofumpt::validate() { + # shellcheck disable=SC2046 + if [[ -n "$(command -v $(seal::format::gofumpt::bin))" ]]; then + return 0 + fi + + seal::log::info "installing gofumpt" + if seal::format::gofumpt::install; then + return 0 + fi + seal::log::error "no gofumpt available" + return 1 +} + +function seal::format::gofumpt::bin() { + local bin="gofumpt" + if [[ -f "${ROOT_DIR}/.sbin/gofumpt" ]]; then + bin="${ROOT_DIR}/.sbin/gofumpt" + fi + echo -n "${bin}" +} + +# install golines +function seal::format::golines::install() { + GOBIN="${ROOT_DIR}/.sbin" go install github.com/segmentio/golines@latest +} + +function seal::format::golines::validate() { + # shellcheck disable=SC2046 + if [[ -n "$(command -v $(seal::format::golines::bin))" ]]; then + return 0 + fi + + seal::log::info "installing golines" + if seal::format::golines::install; then + return 0 + fi + seal::log::error "no golines available" + return 1 +} + +function seal::format::golines::bin() { + local bin="golines" + if [[ -f "${ROOT_DIR}/.sbin/golines" ]]; then + bin="${ROOT_DIR}/.sbin/golines" + fi + echo -n "${bin}" +} + +# install wsl(Whitespace Linter) +function seal::format::wsl::install() { + GOBIN="${ROOT_DIR}/.sbin" go install github.com/bombsimon/wsl/v4/cmd...@master +} + +function seal::format::wsl::validate() { + # shellcheck disable=SC2046 + if [[ -n "$(command -v $(seal::format::wsl::bin))" ]]; then + return 0 + fi + + seal::log::info "installing wsl" + if seal::format::wsl::install; then + return 0 + fi + seal::log::error "no wsl available" + return 1 +} + +function seal::format::wsl::bin() { + local bin="wsl" + if [[ -f "${ROOT_DIR}/.sbin/wsl" ]]; then + bin="${ROOT_DIR}/.sbin/wsl" + fi + echo -n "${bin}" +} + +function seal::format::run() { + local path=$1 + shift 1 + # shellcheck disable=SC2206 + local path_ignored=(${*}) + + # goimports + if ! seal::format::goimports::validate; then + seal::log::fatal "cannot execute goimports as client is not found" + fi + + # shellcheck disable=SC2155 + local goimports_opts=( + "-rm-unused" + "-set-alias" + "-use-cache" + "-imports-order=std,general,company,project,blanked,dotted" + "-output=file" + ) + set +e + if [[ ${#path_ignored[@]} -gt 0 ]]; then + seal::log::debug "pushd ${path}; go list -f \"{{.Dir}}\" ./... | grep -v -E \"$(seal::util::join_array "|" "${path_ignored[@]}")\" | xargs goimports-reviser ${goimports_opts[*]}; popd" + [[ "${path}" == "${ROOT_DIR}" ]] || pushd "${path}" >/dev/null 2>&1 + go list -f "{{.Dir}}" ./... | grep -v -E "$(seal::util::join_array "|" "${path_ignored[@]}")" | xargs "$(seal::format::goimports::bin)" "${goimports_opts[@]}" + [[ "${path}" == "${ROOT_DIR}" ]] || popd >/dev/null 2>&1 + else + seal::log::debug "pushd ${path}; go list -f \"{{.Dir}}\" ./... | xargs goimports-reviser ${goimports_opts[*]}; popd" + [[ "${path}" == "${ROOT_DIR}" ]] || pushd "${path}" >/dev/null 2>&1 + go list -f "{{.Dir}}" ./... | xargs "$(seal::format::goimports::bin)" "${goimports_opts[@]}" + [[ "${path}" == "${ROOT_DIR}" ]] || popd >/dev/null 2>&1 + fi + set -e + + # gofmt interface{} -> any + local gofmt_opts=( + "-w" + "-r" + "interface{} -> any" + "${path}" + ) + + seal::log::debug "gofmt ${gofmt_opts[*]}" + gofmt "${gofmt_opts[@]}" + + # golines + if ! seal::format::golines::validate; then + seal::log::fatal "cannot execute golines as client is not found" + fi + + # gofumpt for golines base-formatter + if ! seal::format::gofumpt::validate; then + seal::log::fatal "cannot execute gofumpt as client is not found" + fi + + local golines_opts=( + "-w" + "--max-len=120" + "--no-reformat-tags" + "--ignore-generated" # file start with generated_ + "--ignored-dirs=.git" + "--ignored-dirs=node_modules" + "--ignored-dirs=vendor" + ) + for ig in "${path_ignored[@]}"; do + golines_opts+=("--ignored-dirs=${ig}") + done + golines_opts+=( + "--base-formatter=$(seal::format::gofumpt::bin) -extra" # format by gofumpt + "${path}" + ) + seal::log::debug "golines ${golines_opts[*]}" + $(seal::format::golines::bin) "${golines_opts[@]}" + + # wsl + if ! seal::format::wsl::validate; then + seal::log::fatal "cannot execute wsl as client is not found" + fi + + local wsl_opts=( + "--allow-assign-and-anything" + "--allow-trailing-comment" + "--force-short-decl-cuddling=false" + "--fix" + ) + set +e + if [[ ${#path_ignored[@]} -gt 0 ]]; then + seal::log::debug "pushd ${path}; go list ./... | grep -v -E \"$(seal::util::join_array "|" "${path_ignored[@]}")\" | xargs wsl ${wsl_opts[*]}; popd" + [[ "${path}" == "${ROOT_DIR}" ]] || pushd "${path}" >/dev/null 2>&1 + go list ./... | grep -v -E "$(seal::util::join_array "|" "${path_ignored[@]}")" | xargs "$(seal::format::wsl::bin)" "${wsl_opts[@]}" >/dev/null 2>&1 + [[ "${path}" == "${ROOT_DIR}" ]] || popd >/dev/null 2>&1 + else + seal::log::debug "pushd ${path}; go list ./... | xargs wsl ${wsl_opts[*]}; popd" + [[ "${path}" == "${ROOT_DIR}" ]] || pushd "${path}" >/dev/null 2>&1 + go list ./... | xargs "$(seal::format::wsl::bin)" "${wsl_opts[@]}" + [[ "${path}" == "${ROOT_DIR}" ]] || popd >/dev/null 2>&1 + fi + set -e +} + +function seal::commit::commitsar::install() { + local os + os="$(seal::util::get_raw_os)" + local arch + arch="$(seal::util::get_raw_arch)" + curl --retry 3 --retry-all-errors --retry-delay 3 \ + -o /tmp/commitsar.tar.gz \ + -sSfL "https://github.com/aevea/commitsar/releases/download/${commitsar_version}/commitsar_${commitsar_version#v}_${os}_${arch}.tar.gz" + tar -zxvf /tmp/commitsar.tar.gz \ + --directory "${ROOT_DIR}/.sbin" \ + --no-same-owner \ + --exclude ./LICENSE \ + --exclude ./README.md + chmod a+x "${ROOT_DIR}/.sbin/commitsar" +} + +function seal::commit::commitsar::validate() { + # shellcheck disable=SC2046 + if [[ -n "$(command -v $(seal::commit::commitsar::bin))" ]]; then + if [[ $($(seal::commit::commitsar::bin) version 2>&1 | cut -d " " -f 7 2>&1 | head -n 1 | xargs echo -n) == "${commitsar_version#v}" ]]; then + return 0 + fi + fi + + seal::log::info "installing commitsar ${commitsar_version}" + if seal::commit::commitsar::install; then + seal::log::info "commitsar $($(seal::commit::commitsar::bin) version 2>&1 | cut -d " " -f 7 2>&1 | head -n 1 | xargs echo -n)" + return 0 + fi + seal::log::error "no commitsar available" + return 1 +} + +function seal::commit::commitsar::bin() { + local bin="commitsar" + if [[ -f "${ROOT_DIR}/.sbin/commitsar" ]]; then + bin="${ROOT_DIR}/.sbin/commitsar" + fi + echo -n "${bin}" +} + +function seal::commit::lint() { + if ! seal::commit::commitsar::validate; then + seal::log::fatal "cannot execute commitsar as client is not found" + fi + + seal::log::debug "commitsar $*" + $(seal::commit::commitsar::bin) "$@" +} diff --git a/hack/lib/target.sh b/hack/lib/target.sh new file mode 100644 index 0000000..d277c8b --- /dev/null +++ b/hack/lib/target.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +function seal::target::build_prefix() { + local prefix + prefix="$(basename "${ROOT_DIR}")" + + if [[ -n "${BUILD_PREFIX:-}" ]]; then + echo -n "${BUILD_PREFIX}" + else + echo -n "${prefix}" + fi +} + +readonly DEFAULT_BUILD_TAGS=( + "netgo" + "jsoniter" +) + +function seal::target::build_tags() { + local target="${1:-}" + + local tags + if [[ -n "${BUILD_TAGS:-}" ]]; then + IFS="," read -r -a tags <<<"${BUILD_TAGS}" + else + case "${target}" in + utils) + tags=() + ;; + *) + tags=("${DEFAULT_BUILD_TAGS[@]}") + ;; + esac + fi + + if [[ ${#tags[@]} -ne 0 ]]; then + echo -n "${tags[@]}" + fi +} + +readonly DEFAULT_BUILD_PLATFORMS=( + linux/amd64 + linux/arm64 + darwin/amd64 + darwin/arm64 + windows/amd64 +) + +function seal::target::build_platforms() { + local target="${1:-}" + + local platforms + if [[ -z "${OS:-}" ]] && [[ -z "${ARCH:-}" ]]; then + platforms=("${DEFAULT_BUILD_PLATFORMS[@]}") + else + local os="${OS:-$(seal::util::get_raw_os)}" + local arch="${ARCH:-$(seal::util::get_raw_arch)}" + platforms=("${os}/${arch}") + fi + + if [[ ${#platforms[@]} -ne 0 ]]; then + echo -n "${platforms[@]}" + fi +} diff --git a/hack/lib/util.sh b/hack/lib/util.sh new file mode 100644 index 0000000..4700a12 --- /dev/null +++ b/hack/lib/util.sh @@ -0,0 +1,179 @@ +#!/usr/bin/env bash + +function seal::util::find_subdirs() { + local path="$1" + if [[ -z "$path" ]]; then + path="./" + fi + # shellcheck disable=SC2010 + ls -l "$path" | grep "^d" | awk '{print $NF}' | xargs echo +} + +function seal::util::is_empty_dir() { + local path="$1" + if [[ ! -d "${path}" ]]; then + return 0 + fi + + # shellcheck disable=SC2012 + if [[ $(ls "${path}" | wc -l) -eq 0 ]]; then + return 0 + fi + return 1 +} + +function seal::util::join_array() { + local IFS="$1" + shift 1 + echo "$*" +} + +function seal::util::get_os() { + local os + if go env GOOS >/dev/null 2>&1; then + os=$(go env GOOS) + else + os=$(echo -n "$(uname -s)" | tr '[:upper:]' '[:lower:]') + fi + + case ${os} in + cygwin_nt*) os="windows" ;; + mingw*) os="windows" ;; + msys_nt*) os="windows" ;; + esac + + echo -n "${os}" +} + +function seal::util::get_raw_os() { + local os + os=$(echo -n "$(uname -s)" | tr '[:upper:]' '[:lower:]') + + case ${os} in + cygwin_nt*) os="windows" ;; + mingw*) os="windows" ;; + msys_nt*) os="windows" ;; + esac + + echo -n "${os}" +} + +function seal::util::get_arch() { + local arch + if go env GOARCH >/dev/null 2>&1; then + arch=$(go env GOARCH) + if [[ "${arch}" == "arm" ]]; then + arch="${arch}v$(go env GOARM)" + fi + else + arch=$(uname -m) + fi + + case ${arch} in + armv5*) arch="armv5" ;; + armv6*) arch="armv6" ;; + armv7*) + if [[ "${1:-}" == "--full-name" ]]; then + arch="armv7" + else + arch="arm" + fi + ;; + aarch64) arch="arm64" ;; + x86) arch="386" ;; + i686) arch="386" ;; + i386) arch="386" ;; + x86_64) arch="amd64" ;; + esac + + echo -n "${arch}" +} + +function seal::util::get_raw_arch() { + local arch + arch=$(uname -m) + + case ${arch} in + armv5*) arch="armv5" ;; + armv6*) arch="armv6" ;; + armv7*) + if [[ "${1:-}" == "--full-name" ]]; then + arch="armv7" + else + arch="arm" + fi + ;; + aarch64) arch="arm64" ;; + x86) arch="386" ;; + i686) arch="386" ;; + i386) arch="386" ;; + x86_64) arch="amd64" ;; + esac + + echo -n "${arch}" +} + +function seal::util::get_random_port_start() { + local offset="${1:-1}" + if [[ ${offset} -le 0 ]]; then + offset=1 + fi + + while true; do + random_port=$((RANDOM % 10000 + 50000)) + for ((i = 0; i < offset; i++)); do + if nc -z 127.0.0.1 $((random_port + i)); then + random_port=0 + break + fi + done + + if [[ ${random_port} -ne 0 ]]; then + echo -n "${random_port}" + break + fi + done +} + +function seal::util::sed() { + if ! sed -i "$@" >/dev/null 2>&1; then + # back off none GNU sed + sed -i "" "$@" + fi +} + +function seal::util::decode64() { + if [[ $# -eq 0 ]]; then + cat | base64 --decode + else + printf '%s' "$1" | base64 --decode + fi +} + +function seal::util::encode64() { + if [[ $# -eq 0 ]]; then + cat | base64 + else + printf '%s' "$1" | base64 + fi +} + +function seal::util::kill_jobs() { + for job in $(jobs -p); do + kill -9 "$job" + done +} + +function seal::util::wait_jobs() { + trap seal::util::kill_jobs TERM INT + local fail=0 + local job + for job in $(jobs -p); do + wait "${job}" || fail=$((fail + 1)) + done + return ${fail} +} + +function seal::util::dismiss() { + echo "" 1>/dev/null 2>&1 +} diff --git a/hack/lib/version.sh b/hack/lib/version.sh new file mode 100644 index 0000000..5dba5bb --- /dev/null +++ b/hack/lib/version.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash + +## +# Inspired by github.com/kubernetes/kubernetes/hack/lib/version.sh +## + +# ----------------------------------------------------------------------------- +# Version management helpers. These functions help to set the +# following variables: +# +# GIT_TREE_STATE - "clean" indicates no changes since the git commit id. +# "dirty" indicates source code changes after the git commit id. +# "archive" indicates the tree was produced by 'git archive'. +# "unknown" indicates cannot find out the git tree. +# GIT_COMMIT - The git commit id corresponding to this +# source code. +# GIT_VERSION - "vX.Y" used to indicate the last release version, +# it can be specified via "VERSION". +# BUILD_DATE - The build date of the version. + +function seal::version::get_version_vars() { + BUILD_DATE=$(date -u '+%Y-%m-%dT%H:%M:%SZ') + GIT_TREE_STATE="unknown" + GIT_COMMIT="unknown" + GIT_VERSION="unknown" + + # get the git tree state if the source was exported through git archive. + # shellcheck disable=SC2016,SC2050 + if [[ '$Format:%%$' == "%" ]]; then + GIT_TREE_STATE="archive" + GIT_COMMIT='$Format:%H$' + # when a 'git archive' is exported, the '$Format:%D$' below will look + # something like 'HEAD -> release-1.8, tag: v1.8.3' where then 'tag: ' + # can be extracted from it. + if [[ '$Format:%D$' =~ tag:\ (v[^ ,]+) ]]; then + GIT_VERSION="${BASH_REMATCH[1]}" + else + GIT_VERSION="${GIT_COMMIT:0:7}" + fi + # respect specified version. + GIT_VERSION="${VERSION:-${GIT_VERSION}}" + return + fi + + # return directly if not found git client. + if [[ -z "$(command -v git)" ]]; then + # respect specified version. + GIT_VERSION=${VERSION:-${GIT_VERSION}} + return + fi + + # find out git info via git client. + if GIT_COMMIT=$(git rev-parse "HEAD^{commit}" 2>/dev/null); then + # specify as dirty if the tree is not clean. + if git_status=$(git status --porcelain 2>/dev/null) && [[ -n ${git_status} ]]; then + GIT_TREE_STATE="dirty" + else + GIT_TREE_STATE="clean" + fi + + # specify with the tag if the head is tagged. + if GIT_VERSION="$(git rev-parse --abbrev-ref HEAD 2>/dev/null)"; then + if git_tag=$(git tag -l --contains HEAD 2>/dev/null | head -n 1 2>/dev/null) && [[ -n ${git_tag} ]]; then + GIT_VERSION="${git_tag}" + fi + fi + + # specify to dev if the tree is dirty. + if [[ "${GIT_TREE_STATE:-dirty}" == "dirty" ]]; then + GIT_VERSION="dev" + fi + + # respect specified version + GIT_VERSION=${VERSION:-${GIT_VERSION}} + + if ! [[ "${GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?(-[0-9A-Za-z.-]+)?(\+[0-9A-Za-z.-]+)?$ ]]; then + GIT_VERSION="dev" + fi + + fi +} diff --git a/hack/lint.sh b/hack/lint.sh new file mode 100755 index 0000000..7478074 --- /dev/null +++ b/hack/lint.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)" +source "${ROOT_DIR}/hack/lib/init.sh" + +function check_dirty() { + [[ "${LINT_DIRTY:-false}" == "true" ]] || return 0 + + if [[ -n "$(command -v git)" ]]; then + if git_status=$(git status --porcelain 2>/dev/null) && [[ -n ${git_status} ]]; then + seal::log::fatal "the git tree is dirty:\n$(git status --porcelain)" + fi + fi +} + +function lint() { + local target="$1" + local path="$2" + local path_ignored="$3" + + local build_tags=() + read -r -a build_tags <<<"$(seal::target::build_tags "${target}")" + + [[ "${path}" == "${ROOT_DIR}" ]] || pushd "${path}" >/dev/null 2>&1 + + seal::format::run "${path}" "${path_ignored}" + if [[ ${#build_tags[@]} -gt 0 ]]; then + GOLANGCI_LINT_CACHE="$(go env GOCACHE)" seal::lint::run --build-tags="\"${build_tags[*]}\"" "${path}/..." + else + GOLANGCI_LINT_CACHE="$(go env GOCACHE)" seal::lint::run "${path}/..." + fi + + [[ "${path}" == "${ROOT_DIR}" ]] || popd >/dev/null 2>&1 +} + +function after() { + check_dirty +} + +# +# main +# + +seal::log::info "+++ LINT +++" + +seal::commit::lint "${ROOT_DIR}" + +lint "kubecia" "${ROOT_DIR}" "" "$@" + +after + +seal::log::info "--- LINT ---" diff --git a/hack/release.sh b/hack/release.sh new file mode 100755 index 0000000..fe0b621 --- /dev/null +++ b/hack/release.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)" +source "${ROOT_DIR}/hack/lib/init.sh" + +BUILD_DIR="${ROOT_DIR}/.dist/build" +mkdir -p "${BUILD_DIR}" + +function release() { + local target="$1" + + local checksum_path="${BUILD_DIR}/${target}/SHA256SUMS" + shasum -a 256 "${BUILD_DIR}/${target}"/* | sed -e "s#${BUILD_DIR}/${target}/##g" >"${checksum_path}" + if [[ -n "${GPG_FINGERPRINT:-}" ]]; then + gpg --batch --local-user "${GPG_FINGERPRINT}" --detach-sign "${checksum_path}" + else + gpg --batch --detach-sign "${checksum_path}" + fi +} + +# +# main +# + +seal::log::info "+++ RELEASE +++" "tag: ${GIT_VERSION}" + +release "kubecia" "${ROOT_DIR}" "$@" + +seal::log::info "--- RELEASE ---" diff --git a/hack/test.sh b/hack/test.sh new file mode 100755 index 0000000..e8592fe --- /dev/null +++ b/hack/test.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)" +source "${ROOT_DIR}/hack/lib/init.sh" + +TEST_DIR="${ROOT_DIR}/.dist/test" +mkdir -p "${TEST_DIR}" + +function test() { + local target="$1" + local path="$2" + + [[ "${path}" == "${ROOT_DIR}" ]] || pushd "${path}" >/dev/null 2>&1 + + local tags=() + # shellcheck disable=SC2086 + IFS=" " read -r -a tags <<<"$(seal::target::build_tags ${target})" + + CGO_ENABLED=1 go test \ + -v \ + -failfast \ + -race \ + -cover \ + -timeout=10m \ + -tags="${tags[*]}" \ + -coverprofile="${TEST_DIR}/${target}-coverage.out" \ + "${path}/..." + + [[ "${path}" == "${ROOT_DIR}" ]] || popd >/dev/null 2>&1 +} + +function dispatch() { + local target="$1" + local path="$2" + + shift 2 + local specified_targets="$*" + if [[ -n ${specified_targets} ]] && [[ ! ${specified_targets} =~ ${target} ]]; then + return + fi + + seal::log::debug "testing ${target}" + if [[ "${PARALLELIZE:-true}" == "false" ]]; then + test "${target}" "${path}" + else + test "${target}" "${path}" & + fi +} + +# +# main +# + +seal::log::info "+++ TEST +++" + +dispatch "kubecia" "${ROOT_DIR}" "$@" + +if [[ "${PARALLELIZE:-true}" == "true" ]]; then + seal::util::wait_jobs || seal::log::fatal "--- TEST ---" +fi +seal::log::info "--- TEST ---" diff --git a/main.go b/main.go new file mode 100644 index 0000000..c5c5f1b --- /dev/null +++ b/main.go @@ -0,0 +1,174 @@ +package main + +import ( + "flag" + "os" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + klog "k8s.io/klog/v2" + "k8s.io/utils/set" + + "github.com/seal-io/kubecia/cmd/apis" + "github.com/seal-io/kubecia/cmd/plugins" + "github.com/seal-io/kubecia/pkg/signal" + "github.com/seal-io/kubecia/pkg/version" +) + +func init() { + // Adjust Logger. + klog.InitFlags(nil) + pflag.CommandLine.AddGoFlag(flag.CommandLine.Lookup("v")) + pflag.CommandLine.AddGoFlag(flag.CommandLine.Lookup("logtostderr")) + _ = pflag.CommandLine.Set("logtostderr", "true") +} + +func main() { + debugArgs := pflag.Bool( + "debug-args", + false, + "debug arguments, which prints all running arguments, only for development", + ) + + rc := &cobra.Command{ + Use: "kubecia", + Short: `Kubecia is an available the client-go credential (exec) plugin, no Cloud Provider CLI required.`, + SilenceUsage: true, + Version: version.Get(), + PersistentPreRunE: func(c *cobra.Command, args []string) error { + if !*debugArgs { + return nil + } + + if len(os.Args) > 1 && filepath.Base(os.Args[0]) == os.Args[1] { + c.Printf("%s %s\n\n", os.Args[0], strings.Join(os.Args[2:], " ")) + return nil + } + + c.Printf("%s\n\n", strings.Join(os.Args, " ")) + return nil + }, + RunE: func(c *cobra.Command, args []string) error { + return c.Help() + }, + } + + // Add Commands. + plugins.AddCommands(rc) + apis.AddCommands(rc) + + // Retrieve arguments from environment variables. + retrieveArguments(rc) + + // Set output. + rc.SetOut(os.Stdout) + + // Execute. + if err := rc.ExecuteContext(signal.Context()); err != nil { + os.Exit(1) + } +} + +func retrieveArguments(rc *cobra.Command) { + const ( + argPrefix = "--" + envKeyPrefix = "KUBECIA_" + ) + + cmd := filepath.Base(os.Args[0]) + + var sc *cobra.Command + + for _, c := range rc.Commands() { + if c.Name() == cmd { + if len(os.Args) > 1 && !strings.HasPrefix(os.Args[1], argPrefix) { + break + } + + newArgs := make([]string, len(os.Args)+1) + newArgs[0] = os.Args[0] + newArgs[1] = cmd + copy(newArgs[2:], os.Args[1:]) + os.Args = newArgs + + sc = c + + break + } + } + + if len(os.Args) < 2 { + return + } + + if sc == nil { + for _, c := range rc.Commands() { + if c.Name() == os.Args[1] { + sc = c + break + } + } + } + + var ( + envPrefix = envKeyPrefix + flags = rc.Flags() + ) + + if sc != nil { + envPrefix = envPrefix + strings.ToUpper(sc.Name()) + "_" + flags = sc.Flags() + } + + igns := set.New("help", "v", "version") + sets := set.New[string]() + + for _, v := range os.Args[2:] { + if strings.HasPrefix(v, argPrefix) { + vs := strings.SplitN(v, "=", 2) + sets.Insert(vs[0]) + } + } + + envArgs := make([]string, 0, len(os.Environ())*2) + + for _, v := range os.Environ() { + if v2 := strings.TrimPrefix(v, envPrefix); v == v2 { + continue + } else { + v = v2 + } + + vs := strings.SplitN(v, "=", 2) + if len(vs) != 2 { + continue + } + + var ( + fn = strings.ReplaceAll(strings.ToLower(vs[0]), "_", "-") + ek = argPrefix + fn + ) + + if igns.Has(fn) || flags.Lookup(fn) == nil || sets.Has(ek) { + continue + } + + ev := vs[1] + if ev2 := os.ExpandEnv(ev); ev2 != "" && ev != ev2 { + ev = ev2 + } + + envArgs = append(envArgs, ek, ev) + } + + if len(envArgs) == 0 { + return + } + + newArgs := make([]string, len(os.Args)+len(envArgs)) + copy(newArgs, os.Args) + copy(newArgs[len(os.Args):], envArgs) + os.Args = newArgs +} diff --git a/pkg/apis/client.go b/pkg/apis/client.go new file mode 100644 index 0000000..433f8b5 --- /dev/null +++ b/pkg/apis/client.go @@ -0,0 +1,18 @@ +package apis + +import ( + "context" + "net" + "net/http" + "time" +) + +func Client(sock string) *http.Client { + return &http.Client{ + Transport: &http.Transport{ + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return net.DialTimeout("unix", sock, 1*time.Second) + }, + }, + } +} diff --git a/pkg/apis/router.go b/pkg/apis/router.go new file mode 100644 index 0000000..0d5ddc4 --- /dev/null +++ b/pkg/apis/router.go @@ -0,0 +1,17 @@ +package apis + +import ( + "path" +) + +func RoutePrefix(namespace string) string { + return path.Join("/", namespace) + "/" +} + +func Route(namespace string, paths ...string) string { + ps := make([]string, 0, len(paths)+3) + ps = append(ps, "http", ".", namespace) + ps = append(ps, paths...) + + return path.Join(ps...) +} diff --git a/pkg/apis/server/server.go b/pkg/apis/server/server.go new file mode 100644 index 0000000..c31b85f --- /dev/null +++ b/pkg/apis/server/server.go @@ -0,0 +1,139 @@ +package server + +import ( + "context" + "errors" + "fmt" + "log" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/spf13/pflag" + "k8s.io/klog/v2" + + "github.com/seal-io/kubecia/pkg/cache" + "github.com/seal-io/kubecia/pkg/consts" +) + +type ( + ServeOptions struct { + Cache cache.Cache + } + + ServeFunc = func(context.Context, *http.ServeMux, ServeOptions) error + ServeFuncs = []ServeFunc + + Server struct { + Socket string + ServeFuncs ServeFuncs + } +) + +func (s *Server) AddFlags(flags *pflag.FlagSet) { + flags.StringVar(&s.Socket, "socket", consts.SocketPath(), "Socket path") +} + +func (s *Server) Serve(ctx context.Context) error { + ls, err := newUnixListener(s.Socket) + if err != nil { + return fmt.Errorf("error creating unix listener %s: %w", s.Socket, err) + } + + defer func() { + _ = ls.Close() + }() + + c, err := cache.NewMemory(ctx) + if err != nil { + return fmt.Errorf("error creating cache: %w", err) + } + + defer func() { + _ = c.Close() + }() + + m := http.NewServeMux() + o := ServeOptions{ + Cache: cache.WithSingleFlight(c), + } + + for i := range s.ServeFuncs { + err = s.ServeFuncs[i](ctx, m, o) + if err != nil { + return fmt.Errorf("error serving: %w", err) + } + } + + logger := klog.LoggerWithName(klog.Background(), "http") + + srv := http.Server{ + Handler: m, + ReadTimeout: 1 * time.Second, + WriteTimeout: 5 * time.Second, + ErrorLog: log.New(httpLogger(logger), "", 0), + } + + go func() { + <-ctx.Done() + + _ = srv.Close() + }() + + err = srv.Serve(ls) + if err != nil && !errors.Is(err, http.ErrServerClosed) { + return err + } + + return nil +} + +func (s *Server) Register(f ServeFunc) { + if f == nil { + return + } + + s.ServeFuncs = append(s.ServeFuncs, f) +} + +func newUnixListener(sock string) (net.Listener, error) { + err := os.MkdirAll(filepath.Dir(sock), 0o700) + if err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("error creating unix socket dir: %w", err) + } + + err = syscall.Unlink(sock) + if err != nil && !os.IsNotExist(err) { + return nil, fmt.Errorf("error unlinking unix socket: %w", err) + } + + ls, err := net.ListenUnix("unix", &net.UnixAddr{Net: "unix", Name: sock}) + if err != nil { + return nil, fmt.Errorf("error creating unix socket listener: %w", err) + } + + err = os.Chmod(sock, 0o777) + if err != nil { + _ = ls.Close() + return nil, fmt.Errorf("error chmoding unix socket: %w", err) + } + + return ls, nil +} + +type httpLogger klog.Logger + +func (l httpLogger) Write(p []byte) (int, error) { + // Trim the trailing newline. + s := strings.TrimSuffix(string(p), "\n") + + if !strings.Contains(s, "broken pipe") { + klog.Logger(l).Info(s) + } + + return len(p), nil +} diff --git a/pkg/bytespool/pool.go b/pkg/bytespool/pool.go new file mode 100644 index 0000000..7648bb5 --- /dev/null +++ b/pkg/bytespool/pool.go @@ -0,0 +1,61 @@ +package bytespool + +import ( + "bytes" + "sync" +) + +const defaultBytesSize = 32 * 1024 + +var ( + bytesPool = sync.Pool{ + New: func() any { + b := make([]byte, defaultBytesSize) + return &b + }, + } + + bufferPool = sync.Pool{ + New: func() any { + return bytes.NewBuffer(GetBytes(0)) + }, + } +) + +func GetBytes(length int) []byte { + var ( + bsp = bytesPool.Get().(*[]byte) + bs = *bsp + ) + + if length <= 0 { + length = defaultBytesSize + } + + if cap(bs) >= length { + return bs[:length] + } + + bytesPool.Put(bsp) + + return make([]byte, length) +} + +func GetBuffer() *bytes.Buffer { + return bufferPool.Get().(*bytes.Buffer) +} + +func Put(b any) { + switch t := b.(type) { + case *[]byte: + bytesPool.Put(t) + case []byte: + bytesPool.Put(&t) + case *bytes.Buffer: + t.Reset() + bufferPool.Put(t) + case bytes.Buffer: + t.Reset() + bufferPool.Put(&t) + } +} diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go new file mode 100644 index 0000000..2b148d5 --- /dev/null +++ b/pkg/cache/cache.go @@ -0,0 +1,30 @@ +package cache + +import ( + "context" + "errors" + "io" +) + +var ( + ErrEntryNotFound = errors.New("entry is not found") + ErrEntryTooBig = errors.New("entry is too big") +) + +// Cache holds the action of caching. +type Cache interface { + io.Closer + + Name() string + + // Set saves entry with the given key, + // it returns an ErrEntryTooBig when entry is too big. + Set(ctx context.Context, key string, entry []byte) error + + // Delete removes the given key. + Delete(ctx context.Context, key string) ([]byte, error) + + // Get reads entry for the given key, + // it returns an ErrEntryNotFound when no entry exists for the given key. + Get(ctx context.Context, key string) ([]byte, error) +} diff --git a/pkg/cache/file.go b/pkg/cache/file.go new file mode 100644 index 0000000..2fbddc0 --- /dev/null +++ b/pkg/cache/file.go @@ -0,0 +1,291 @@ +package cache + +import ( + "context" + "errors" + "fmt" + "hash/fnv" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/dustin/go-humanize" + "github.com/spf13/afero" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + + "github.com/seal-io/kubecia/pkg/consts" +) + +// FileConfig holds the configuration of the filesystem cache, +// entry indexes by key and stores in one file. +type FileConfig struct { + // Namespace indicates the operating workspace. + Namespace string + // EntryMaxAge indicates the maximum lifetime of each entry, + // default is 15 mins. + EntryMaxAge time.Duration + // LazyEntryEviction indicates to evict an expired entry at next peeking, + // by default, a background looping tries to evict expired entries per 3 mins. + LazyEntryEviction bool + // Buckets indicates the bucket number of cache, + // value must be a power of two, + // default is 12. + Buckets int +} + +func (c *FileConfig) Default() { + c.Namespace = strings.TrimSpace(c.Namespace) + if c.EntryMaxAge == 0 { + c.EntryMaxAge = 15 * time.Minute + } + + if c.Buckets == 0 { + c.Buckets = 12 + } +} + +func (c *FileConfig) Validate() error { + if c.EntryMaxAge < 0 { + return errors.New("invalid entry max age: negative") + } + + if c.Buckets < 0 { + return errors.New("invalid buckets: negative") + } + + return nil +} + +// NewFile returns a filesystem Cache implementation. +func NewFile(ctx context.Context) (Cache, error) { + return NewFileWithConfig(ctx, FileConfig{}) +} + +// MustNewFile likes NewFile, but panic if error found. +func MustNewFile(ctx context.Context) Cache { + n, err := NewFile(ctx) + if err != nil { + panic(fmt.Errorf("error creating filesystem cache: %w", err)) + } + + return n +} + +const ( + dirPerm = 0o700 + filePerm = 0o600 + pathSep = string(filepath.Separator) +) + +// NewFileWithConfig returns a filesystem Cache implementation with given configuration. +func NewFileWithConfig(ctx context.Context, cfg FileConfig) (Cache, error) { + // Default, validate. + cfg.Default() + + err := cfg.Validate() + if err != nil { + return nil, err + } + + logger := klog.LoggerWithName(klog.Background(), "cache.file") + + // Prepare directories. + dataDir := consts.DataDir() + if err = os.MkdirAll(dataDir, dirPerm); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("error creating data dir: %w", err) + } + + for i := 0; i < cfg.Buckets; i++ { + bucketDir := filepath.Join(dataDir, strconv.FormatInt(int64(i), 10)) + if err = os.MkdirAll(bucketDir, dirPerm); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("error creating bucket dir: %w", err) + } + } + + // Init. + underlay := afero.NewBasePathFs(afero.NewOsFs(), dataDir) + + if !cfg.LazyEntryEviction { + go func() { + _ = wait.PollUntilContextCancel(ctx, 3*time.Minute, true, func(ctx context.Context) (bool, error) { + _ = afero.Walk(underlay, pathSep, func(path string, fi os.FileInfo, _ error) error { + if fi.IsDir() { + return nil + } + + if !fi.ModTime().Local().Add(cfg.EntryMaxAge).Before(time.Now()) { + return nil + } + + err := underlay.Remove(path) + if err != nil && !os.IsNotExist(err) { + logger.Error(err, "error evicting expired entry", "path", path) + } + + return nil + }) + + return false, nil + }) + }() + } + + fc := fileCache{ + logger: logger, + underlay: underlay, + bucket: uint64(cfg.Buckets), + namespace: cfg.Namespace, + expiration: cfg.EntryMaxAge, + lazyEvict: cfg.LazyEntryEviction, + } + + return fc, nil +} + +// MustNewFileWithConfig likes NewFileWithConfig, but panic if error found. +func MustNewFileWithConfig(ctx context.Context, cfg FileConfig) Cache { + n, err := NewFileWithConfig(ctx, cfg) + if err != nil { + panic(fmt.Errorf("error creating filesystem cache: %w", err)) + } + + return n +} + +// fileCache adapts Cache interface to implement a filesystem cache with afero.Fs. +type fileCache struct { + logger klog.Logger + underlay afero.Fs + bucket uint64 + namespace string + expiration time.Duration + lazyEvict bool +} + +func (c fileCache) wrapKey(s *string) *string { + r := filepath.Join(pathSep, c.namespace, *s) + + h := fnv.New64a() + _, _ = h.Write([]byte(r)) + p := strconv.FormatUint(h.Sum64()%c.bucket, 10) + + r = filepath.Join(pathSep, p, r) + + return &r +} + +func (c fileCache) Close() error { + return nil +} + +func (c fileCache) Name() string { + return "file" +} + +func (c fileCache) Set(ctx context.Context, key string, entry []byte) error { + wk := c.wrapKey(&key) + + err := c.underlay.MkdirAll(filepath.Dir(*wk), dirPerm) + if err != nil && !os.IsExist(err) { + return err + } + + err = afero.WriteFile(c.underlay, *wk, entry, filePerm) + if err != nil && !os.IsExist(err) { + return wrapFileError(err) + } + + err = c.underlay.Chtimes(*wk, time.Now(), time.Now()) + if err != nil { + return wrapFileError(err) + } + + if lg := c.logger.V(5); lg.Enabled() { + lg.Info("set", + "key", key, "size", humanize.IBytes(uint64(len(entry)))) + } + + return nil +} + +func (c fileCache) Delete(ctx context.Context, key string) ([]byte, error) { + wk := c.wrapKey(&key) + + entry, err := afero.ReadFile(c.underlay, *wk) + if err != nil { + return nil, wrapFileError(err) + } + + if !c.lazyEvict { + err = c.underlay.Chtimes(*wk, time.Now(), time.Now().Add(-c.expiration)) + } else { + err = c.underlay.Remove(*wk) + } + + if err != nil { + return nil, wrapFileError(err) + } + + if lg := c.logger.V(5); err == nil && lg.Enabled() { + lg.Info("deleted", + "key", key, "size", humanize.IBytes(uint64(len(entry)))) + } + + return entry, nil +} + +func (c fileCache) Get(ctx context.Context, key string) ([]byte, error) { + wk := c.wrapKey(&key) + + fi, err := c.underlay.Stat(*wk) + if err != nil { + if os.IsNotExist(err) { + c.logger.V(5).Info("missed", "key", key) + } + + return nil, wrapFileError(err) + } + + if fi.ModTime().Local().Add(c.expiration).Before(time.Now()) { + if c.lazyEvict { + _ = c.underlay.Remove(*wk) + } + + c.logger.V(5).Info("missed", "key", key) + + return nil, ErrEntryNotFound + } + + entry, err := afero.ReadFile(c.underlay, *wk) + if err != nil { + if os.IsNotExist(err) { + c.logger.V(5).Info("missed", "key", key) + } + + return nil, wrapFileError(err) + } + + if lg := c.logger.V(5); err == nil && lg.Enabled() { + lg.Info("hit", + "key", key, "size", humanize.IBytes(uint64(len(entry)))) + } + + return entry, nil +} + +func wrapFileError(err error) error { + switch { + case err == nil: + return nil + case os.IsNotExist(err): + return ErrEntryNotFound + case errors.Is(err, io.ErrShortWrite): + return ErrEntryTooBig + } + + return err +} diff --git a/pkg/cache/memory.go b/pkg/cache/memory.go new file mode 100644 index 0000000..a17868c --- /dev/null +++ b/pkg/cache/memory.go @@ -0,0 +1,275 @@ +package cache + +import ( + "context" + "errors" + "fmt" + "path" + "strings" + "time" + + "github.com/allegro/bigcache/v3" + "github.com/dustin/go-humanize" + "k8s.io/klog/v2" +) + +// MemoryConfig holds the configuration of the in-memory cache, +// entry indexes by key and stores in one bucket, +// the total cache size is BucketCapacity * Buckets. +type MemoryConfig struct { + // Namespace indicates the operating workspace. + Namespace string + // EntryMaxAge indicates the maximum lifetime of each entry, + // default is 15 mins. + EntryMaxAge time.Duration + // LazyEntryEviction indicates to evict an expired entry at next peeking, + // by default, a background looping tries to evict expired entries per 3 mins. + LazyEntryEviction bool + // Buckets indicates the bucket number of cache, + // value must be a power of two, + // default is 64. + Buckets int + // BucketCapacity indicates the maximum MB of each bucket, + // default is 1 MB. + BucketCapacity int + // LazyBucketCapacityScale indicates to scale when the current bucket is not enough to put a new entry, + // by default, create the bucket with the given capacity to avoid any array copying. + // It's worth noticing that the bucket capacity can not exceed even configured LazyBucketCapacityScale to true. + LazyBucketCapacityScale bool +} + +func (c *MemoryConfig) Default() { + c.Namespace = strings.TrimSpace(c.Namespace) + if c.EntryMaxAge == 0 { + c.EntryMaxAge = 15 * time.Minute + } + + if c.Buckets == 0 { + c.Buckets = 64 + } + + if c.BucketCapacity == 0 { + c.BucketCapacity = 1 + } +} + +func (c *MemoryConfig) Validate() error { + if c.EntryMaxAge < 0 { + return errors.New("invalid entry max age: negative") + } + + if c.Buckets < 0 { + return errors.New("invalid buckets: negative") + } + + if c.BucketCapacity < 0 { + return errors.New("invalid bucket capacity: negative") + } + + return nil +} + +// NewMemory returns an in-memory Cache implementation. +func NewMemory(ctx context.Context) (Cache, error) { + return NewMemoryWithConfig(ctx, MemoryConfig{}) +} + +// MustNewMemory likes NewMemory, but panic if error found. +func MustNewMemory(ctx context.Context) Cache { + n, err := NewMemory(ctx) + if err != nil { + panic(fmt.Errorf("error creating in-memory cache: %w", err)) + } + + return n +} + +// NewMemoryWithConfig returns an in-memory Cache implementation with given configuration. +func NewMemoryWithConfig(ctx context.Context, cfg MemoryConfig) (Cache, error) { + // Default, validate. + cfg.Default() + + err := cfg.Validate() + if err != nil { + return nil, err + } + + // Generate bigcache configuration with MemoryConfig. + // + // For example: + // + // bigcache.Config{ + // LifeWindow: 15 * time.Minute, + // CleanWindow: 3 * time.Minute, + // Shards: 64, + // MaxEntriesInWindow: 64 * 300, // works with MaxEntrySize to determinate the cache initialization. + // MaxEntrySize: 512, + // HardMaxCacheSize: 64, + // StatsEnabled: false, + // Verbose: false, + // } + // + // Each shard initializes with `(MaxEntriesInWindows / Shards) * MaxEntrySize` = 300 * 512 = 150kb. + // Each shard limits in `(HardMaxCacheSize * 1024 * 1024) / Shards` = 64 * 1024 * 1024 / 64 = 1mb. + // Initializes with 64 * 150kb = 9mb, limits with 64 * 1mb = 64mb. + // + capacity := cfg.BucketCapacity * cfg.Buckets + + logger := klog.LoggerWithName(klog.Background(), "cache.memory") + + underlayCfg := bigcache.Config{ + Shards: cfg.Buckets, + LifeWindow: cfg.EntryMaxAge, + CleanWindow: 0, + MaxEntriesInWindow: cfg.Buckets << 4, + MaxEntrySize: cfg.BucketCapacity << (20 - 4), + HardMaxCacheSize: capacity, + StatsEnabled: false, + Verbose: false, + Logger: bigcacheLogger(logger), + OnRemoveWithReason: func(key string, entry []byte, reason bigcache.RemoveReason) { + desc := "unknown" + switch reason { + case bigcache.Deleted: + desc = "deleted" + case bigcache.Expired: + desc = "expired" + case bigcache.NoSpace: + desc = "nospace" + } + + if lg := logger.V(6); lg.Enabled() { + lg.Info(desc, "key", key, "size", humanize.IBytes(uint64(len(entry)))) + } + }, + } + if !cfg.LazyEntryEviction { + // Set up a background looping to clean. + underlayCfg.CleanWindow = 3 * time.Minute + } + + if cfg.LazyBucketCapacityScale { + // Initialize the cache queue in 1/4 capacity. + underlayCfg.MaxEntrySize >>= 2 + } + + // Init. + underlay, err := bigcache.New(ctx, underlayCfg) + if err != nil { + return nil, err + } + + mc := memoryCache{ + logger: logger, + underlay: underlay, + namespace: cfg.Namespace, + } + + return mc, nil +} + +// MustNewMemoryWithConfig likes NewMemoryWithConfig, but panic if error found. +func MustNewMemoryWithConfig(ctx context.Context, cfg MemoryConfig) Cache { + n, err := NewMemoryWithConfig(ctx, cfg) + if err != nil { + panic(fmt.Errorf("error creating in-memory cache: %w", err)) + } + + return n +} + +// memoryCache adapts Cache interface to implement an in-memory cache with bigcache.BigCache. +type memoryCache struct { + logger klog.Logger + underlay *bigcache.BigCache + namespace string +} + +func (c memoryCache) wrapKey(s *string) *string { + r := path.Join("/", c.namespace, *s) + return &r +} + +func (c memoryCache) Close() error { + return c.underlay.Close() +} + +func (c memoryCache) Name() string { + return "memory" +} + +func (c memoryCache) Set(ctx context.Context, key string, entry []byte) error { + wk := c.wrapKey(&key) + + err := c.underlay.Set(*wk, entry) + if err != nil { + return wrapMemoryError(err) + } + + if lg := c.logger.V(5); lg.Enabled() { + lg.Info("set", + "key", key, "size", humanize.IBytes(uint64(len(entry)))) + } + + return nil +} + +func (c memoryCache) Delete(ctx context.Context, key string) ([]byte, error) { + wk := c.wrapKey(&key) + + entry, err := c.underlay.Get(*wk) + if err != nil { + return nil, wrapMemoryError(err) + } + + err = c.underlay.Delete(*wk) + if err != nil { + return nil, wrapMemoryError(err) + } + + if lg := c.logger.V(5); err == nil && lg.Enabled() { + lg.Info("deleted", + "key", key, "size", humanize.IBytes(uint64(len(entry)))) + } + + return entry, nil +} + +func (c memoryCache) Get(ctx context.Context, key string) ([]byte, error) { + wk := c.wrapKey(&key) + + entry, err := c.underlay.Get(*wk) + if err != nil { + if errors.Is(err, bigcache.ErrEntryNotFound) { + c.logger.V(5).Info("missed", "key", key) + } + + return nil, wrapMemoryError(err) + } + + if lg := c.logger.V(5); lg.Enabled() { + lg.Info("hit", + "key", key, "size", humanize.IBytes(uint64(len(entry)))) + } + + return entry, nil +} + +func wrapMemoryError(err error) error { + switch { + case err == nil: + return nil + case errors.Is(err, bigcache.ErrEntryNotFound): + return ErrEntryNotFound + case err.Error() == "entry is bigger than max shard size": + return ErrEntryTooBig + } + + return err +} + +type bigcacheLogger klog.Logger + +func (l bigcacheLogger) Printf(format string, args ...any) { + klog.Logger(l).Info(fmt.Sprintf(format, args...)) +} diff --git a/pkg/cache/singlefilght.go b/pkg/cache/singlefilght.go new file mode 100644 index 0000000..64155e7 --- /dev/null +++ b/pkg/cache/singlefilght.go @@ -0,0 +1,56 @@ +package cache + +import ( + "context" + + "golang.org/x/sync/singleflight" +) + +func WithSingleFlight(c Cache) Cache { + return &singleFlightCache{Cache: c} +} + +type singleFlightCache struct { + sf singleflight.Group + + Cache +} + +func (c *singleFlightCache) Get(ctx context.Context, key string) ([]byte, error) { + ch := c.sf.DoChan(key, func() (any, error) { + return c.Cache.Get(ctx, key) + }) + + select { + case r := <-ch: + return r.Val.([]byte), r.Err + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (c *singleFlightCache) Delete(ctx context.Context, key string) ([]byte, error) { + ch := c.sf.DoChan(key, func() (any, error) { + return c.Cache.Delete(ctx, key) + }) + + select { + case r := <-ch: + return r.Val.([]byte), r.Err + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (c *singleFlightCache) Set(ctx context.Context, key string, entry []byte) error { + ch := c.sf.DoChan(key, func() (any, error) { + return entry, c.Cache.Set(ctx, key, entry) + }) + + select { + case r := <-ch: + return r.Err + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/pkg/consts/const.go b/pkg/consts/const.go new file mode 100644 index 0000000..e390ce1 --- /dev/null +++ b/pkg/consts/const.go @@ -0,0 +1,26 @@ +package consts + +import ( + "os" + "path/filepath" +) + +const ( + // SocketDir is the path to expose the socket consumed by KubeCIA. + SocketDir = "/var/run" +) + +// SocketPath returns the path to the socket consumed by KubeCIA. +func SocketPath() string { + return filepath.Clean("/var/run/kubecia.sock") +} + +// DataDir is the path to the data consumed by KubeCIA. +func DataDir() string { + hd, err := os.UserHomeDir() + if err != nil { + hd = "/var/run/kubecia" + } + + return filepath.Clean(filepath.Join(hd, ".kubecia")) +} diff --git a/pkg/json/jsoniter.go b/pkg/json/jsoniter.go new file mode 100644 index 0000000..2d90cbb --- /dev/null +++ b/pkg/json/jsoniter.go @@ -0,0 +1,101 @@ +//go:build jsoniter + +package json + +import ( + stdjson "encoding/json" + "fmt" + "strconv" + "unsafe" + + jsoniter "github.com/json-iterator/go" +) + +type RawMessage = stdjson.RawMessage + +var ( + json = jsoniter.ConfigCompatibleWithStandardLibrary + Marshal = json.Marshal + Unmarshal = json.Unmarshal + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) + +func init() { + // borrowed from https://github.com/json-iterator/go/issues/145#issuecomment-323483602 + decodeNumberAsInt64IfPossible := func(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + switch iter.WhatIsNext() { + case jsoniter.NumberValue: + var number stdjson.Number + + iter.ReadVal(&number) + i, err := strconv.ParseInt(string(number), 10, 64) + + if err == nil { + *(*any)(ptr) = i + return + } + + f, err := strconv.ParseFloat(string(number), 64) + if err == nil { + *(*any)(ptr) = f + return + } + default: + *(*any)(ptr) = iter.Read() + } + } + jsoniter.RegisterTypeDecoderFunc("interface {}", decodeNumberAsInt64IfPossible) +} + +// MustMarshal is similar to Marshal, +// but panics if found error. +func MustMarshal(v any) []byte { + bs, err := Marshal(v) + if err != nil { + panic(fmt.Errorf("error marshaling json: %w", err)) + } + + return bs +} + +// MustUnmarshal is similar to Unmarshal, +// but panics if found error. +func MustUnmarshal(data []byte, v any) { + err := Unmarshal(data, v) + if err != nil { + panic(fmt.Errorf("error unmarshaling json: %w", err)) + } +} + +// MustMarshalIndent is similar to MarshalIndent, +// but panics if found error. +func MustMarshalIndent(v any, prefix, indent string) []byte { + bs, err := MarshalIndent(v, prefix, indent) + if err != nil { + panic(fmt.Errorf("error marshaling indent json: %w", err)) + } + + return bs +} + +// ShouldMarshal is similar to Marshal, +// but never return error. +func ShouldMarshal(v any) []byte { + bs, _ := Marshal(v) + return bs +} + +// ShouldUnmarshal is similar to Unmarshal, +// but never return error. +func ShouldUnmarshal(data []byte, v any) { + _ = Unmarshal(data, v) +} + +// ShouldMarshalIndent is similar to MarshalIndent, +// but never return error. +func ShouldMarshalIndent(v any, prefix, indent string) []byte { + bs, _ := MarshalIndent(v, prefix, indent) + return bs +} diff --git a/pkg/json/std.go b/pkg/json/std.go new file mode 100644 index 0000000..4605c87 --- /dev/null +++ b/pkg/json/std.go @@ -0,0 +1,69 @@ +//go:build !jsoniter + +package json + +import ( + "encoding/json" + "fmt" +) + +var ( + Marshal = json.Marshal + Unmarshal = json.Unmarshal + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) + +type RawMessage = json.RawMessage + +// MustMarshal is similar to Marshal, +// but panics if found error. +func MustMarshal(v any) []byte { + bs, err := Marshal(v) + if err != nil { + panic(fmt.Errorf("error marshaling json: %w", err)) + } + + return bs +} + +// MustUnmarshal is similar to Unmarshal, +// but panics if found error. +func MustUnmarshal(data []byte, v any) { + err := Unmarshal(data, v) + if err != nil { + panic(fmt.Errorf("error unmarshaling json: %w", err)) + } +} + +// MustMarshalIndent is similar to MarshalIndent, +// but panics if found error. +func MustMarshalIndent(v any, prefix, indent string) []byte { + bs, err := MarshalIndent(v, prefix, indent) + if err != nil { + panic(fmt.Errorf("error marshaling indent json: %w", err)) + } + + return bs +} + +// ShouldMarshal is similar to Marshal, +// but never return error. +func ShouldMarshal(v any) []byte { + bs, _ := Marshal(v) + return bs +} + +// ShouldUnmarshal is similar to Unmarshal, +// but never return error. +func ShouldUnmarshal(data []byte, v any) { + _ = Unmarshal(data, v) +} + +// ShouldMarshalIndent is similar to MarshalIndent, +// but never return error. +func ShouldMarshalIndent(v any, prefix, indent string) []byte { + bs, _ := MarshalIndent(v, prefix, indent) + return bs +} diff --git a/pkg/plugins/aws/client.go b/pkg/plugins/aws/client.go new file mode 100644 index 0000000..ca0de95 --- /dev/null +++ b/pkg/plugins/aws/client.go @@ -0,0 +1,142 @@ +package aws + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "os" + + "github.com/spf13/pflag" + "k8s.io/klog/v2" + + "github.com/seal-io/kubecia/pkg/apis" + "github.com/seal-io/kubecia/pkg/bytespool" + "github.com/seal-io/kubecia/pkg/cache" + "github.com/seal-io/kubecia/pkg/consts" + "github.com/seal-io/kubecia/pkg/token" + "github.com/seal-io/kubecia/pkg/version" +) + +type Client struct { + Socket string + AccessKeyID string + SecretAccessKey string + Region string + Cluster string + AssumeRoleARN string +} + +func (cli *Client) AddFlags(flags *pflag.FlagSet) { + flags.StringVar(&cli.Socket, "socket", consts.SocketPath(), "Socket path") + flags.StringVar(&cli.AccessKeyID, "access-key-id", "", "AWS access key ID *") + flags.StringVar(&cli.SecretAccessKey, "secret-access-key", "", "AWS secret access key *") + flags.StringVar(&cli.Region, "region", "", "AWS region *") + flags.StringVar(&cli.Cluster, "cluster", "", "AWS cluster ID or name *") + flags.StringVar(&cli.AssumeRoleARN, "assume-role-arn", "", "AWS assume role ARN") +} + +func (cli *Client) GetToken(ctx context.Context) (*token.Token, error) { + logger := klog.LoggerWithName(klog.Background(), Namespace) + + if si, err := os.Stat(cli.Socket); err == nil && si.Mode()&os.ModeSocket != 0 { + logger.V(6).Info("getting from central service") + + tk, err := cli.GetTokenByHTTP(ctx, apis.Client(cli.Socket)) + if err == nil { + logger.V(6).Info("got from central service") + + return tk, nil + } + + var rce remoteCallError + if !errors.As(err, &rce) { + return nil, err + } + + logger.Error(err, "error getting from central service, try getting locally") + } else { + logger.V(6).Info("getting locally") + } + + tk, err := cli.getToken(ctx) + if err == nil { + logger.V(6).Info("got locally") + + return tk, nil + } + + return nil, fmt.Errorf("error getting token locally: %w", err) +} + +func (cli *Client) GetTokenByHTTP(ctx context.Context, httpc *http.Client) (*token.Token, error) { + url := apis.Route(Namespace, cli.Region, cli.Cluster, cli.AssumeRoleARN) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, wrapRemoteCallError(fmt.Errorf("error creating remote request: %w", err)) + } + + req.SetBasicAuth(cli.AccessKeyID, cli.SecretAccessKey) + + req.Header.Set("User-Agent", version.Get()) + req.Header.Set("X-KubeCIA-DeCapsuled", "true") + + resp, err := httpc.Do(req) + if err != nil { + return nil, wrapRemoteCallError(fmt.Errorf("error making remote request: %w", err)) + } + + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("error response from remote: %s", resp.Status) + } + + buf := bytespool.GetBuffer() + defer bytespool.Put(buf) + + _, err = io.Copy(buf, resp.Body) + if err != nil { + return nil, fmt.Errorf("error copying response body: %w", err) + } + + var tk token.Token + if err = tk.UnmarshalJSON(buf.Bytes()); err != nil { + return nil, fmt.Errorf("error unmarshalling requested token: %w", err) + } + + return &tk, nil +} + +func (cli *Client) getToken(ctx context.Context) (*token.Token, error) { + c, err := cache.NewFile(ctx) + if err != nil { + return nil, fmt.Errorf("error creating cache: %w", err) + } + + defer func() { _ = c.Close() }() + + o := TokenOptions{ + AccessKeyID: cli.AccessKeyID, + SecretAccessKey: cli.SecretAccessKey, + Region: cli.Region, + Cluster: cli.Cluster, + AssumeRoleARN: cli.AssumeRoleARN, + } + + return GetToken(ctx, o, c) +} + +func wrapRemoteCallError(err error) error { + return remoteCallError{err: err} +} + +type remoteCallError struct { + err error +} + +func (e remoteCallError) Error() string { + return e.err.Error() +} diff --git a/pkg/plugins/aws/server.go b/pkg/plugins/aws/server.go new file mode 100644 index 0000000..3f6e0d4 --- /dev/null +++ b/pkg/plugins/aws/server.go @@ -0,0 +1,108 @@ +package aws + +import ( + "context" + "net/http" + "strings" + + "k8s.io/klog/v2" + + "github.com/seal-io/kubecia/pkg/apis" + "github.com/seal-io/kubecia/pkg/apis/server" +) + +const ( + Namespace = "aws" +) + +func Serve(ctx context.Context, mux *http.ServeMux, opts server.ServeOptions) error { + klog.Infof("serving %[1]s: /%[1]s/{region}/{cluster}[/{assume-role-arn}]\n", Namespace) + + rp := apis.RoutePrefix(Namespace) + hd := http.StripPrefix(rp, &apiServer{ + ServeOptions: opts, + Logger: klog.LoggerWithName(klog.Background(), Namespace), + }) + + mux.Handle(rp, hd) + + return nil +} + +type apiServer struct { + server.ServeOptions + + Logger klog.Logger +} + +func (s *apiServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + c := http.StatusMethodNotAllowed + http.Error(w, http.StatusText(c), c) + + return + } + + var o TokenOptions + + // Authorization: Bearer {accessKeyID:secretAccessKey}. + { + var found bool + + o.AccessKeyID, o.SecretAccessKey, found = r.BasicAuth() + if !found { + c := http.StatusUnauthorized + http.Error(w, http.StatusText(c), c) + + return + } + } + + // Path: {region}/{cluster}[/{assume-role-arn}]. + { + paths := strings.SplitN(r.URL.Path, "/", 3) + if len(paths) < 2 { + c := http.StatusBadRequest + http.Error(w, http.StatusText(c), c) + + return + } + + o.Region = paths[0] + o.Cluster = paths[1] + + if len(paths) == 3 { + o.AssumeRoleARN = paths[2] + } + } + + tk, err := GetToken(r.Context(), o, s.Cache) + if err != nil { + s.Logger.Error(err, "error getting token") + http.Error(w, err.Error(), http.StatusInternalServerError) + + return + } + + var bs []byte + if r.Header.Get("X-KubeCIA-DeCapsuled") == "true" { + bs, err = tk.MarshalJSON() + } else { + bs, err = tk.ToKubeClientExecCredentialJSON() + } + + if err != nil { + s.Logger.Error(err, "error marshaling token") + http.Error(w, err.Error(), http.StatusInternalServerError) + + return + } + + w.Header().Set("Content-Type", "application/json") + + _, err = w.Write(bs) + if err != nil { + s.Logger.Error(err, "error writing response") + return + } +} diff --git a/pkg/plugins/aws/token.go b/pkg/plugins/aws/token.go new file mode 100644 index 0000000..e4cc7ca --- /dev/null +++ b/pkg/plugins/aws/token.go @@ -0,0 +1,208 @@ +package aws + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sts" + "k8s.io/klog/v2" + + "github.com/seal-io/kubecia/pkg/cache" + "github.com/seal-io/kubecia/pkg/token" +) + +type TokenOptions struct { + AccessKeyID string + SecretAccessKey string + Region string + Cluster string + AssumeRoleARN string +} + +func (o *TokenOptions) Validate() error { + var requiredTenant bool + + if strings.HasPrefix(o.AccessKeyID, "$") { + o.AccessKeyID = os.ExpandEnv(o.AccessKeyID) + requiredTenant = true + } + + if o.AccessKeyID == "" { + if requiredTenant { + return errors.New("hosted access key ID is required") + } + + return errors.New("access key ID is required") + } + + if strings.HasPrefix(o.SecretAccessKey, "$") { + o.SecretAccessKey = os.ExpandEnv(o.SecretAccessKey) + requiredTenant = true + } + + if o.SecretAccessKey == "" { + if requiredTenant { + return errors.New("hosted secret access key is required") + } + + return errors.New("secret access key is required") + } + + if o.Region == "" { + return errors.New("region is required") + } + + if o.Cluster == "" { + return errors.New("cluster ID is required") + } + + if o.AssumeRoleARN == "" && requiredTenant { + return errors.New("assume role ARN is required") + } + + return nil +} + +func (o *TokenOptions) Key() string { + ss := []string{ + Namespace, + o.AccessKeyID, + o.Region, + o.Cluster, + o.AssumeRoleARN, + } + if o.AssumeRoleARN == "" { + ss[len(ss)-1] = "self" + } + + return strings.Join(ss, "_") +} + +// GetToken retrieves a token from cache or remote. +func GetToken(ctx context.Context, opts TokenOptions, cacher cache.Cache) (*token.Token, error) { + logger := klog.LoggerWithName(klog.Background(), Namespace) + + err := opts.Validate() + if err != nil { + return nil, fmt.Errorf("invalid options: %w", err) + } + + // Retrieve the token from cache. + ck := opts.Key() + if cacher != nil { + bs, err := cacher.Get(ctx, ck) + if err != nil && !errors.Is(err, cache.ErrEntryNotFound) { + logger.Error(err, "error retrieving token from cache") + } + + if len(bs) != 0 { + var tk token.Token + if err = tk.UnmarshalBinary(bs); err == nil { + if !tk.Expired() { + return &tk, nil + } + } + + if err != nil { + logger.Error(err, "error unmarshalling cached token") + } + } + } + + // Request the token from remote. + tk, err := getToken(ctx, opts) + if err != nil { + return nil, fmt.Errorf("error getting security token: %w", err) + } + + // Save the token into cache. + if cacher != nil { + bs, err := tk.MarshalBinary() + if err != nil { + logger.Error(err, "error marshaling requested token") + } + + if len(bs) != 0 { + err = cacher.Set(ctx, ck, bs) + if err != nil { + logger.Error(err, "error saving token to cache") + } + } + } + + return tk, nil +} + +const ( + requestClusterIDHeader = "x-k8s-aws-id" + requestPresignedParam = 60 + + presignedURLExpiration = 15 * time.Minute + + tokenPrefix = "k8s-aws-v1." +) + +// getToken returns the token, inspired by +// https://github.com/kubernetes-sigs/aws-iam-authenticator/blob/6c197aebdbe1d543f4dff5fee6ae32e71020313b/pkg/token/token.go#L336. +func getToken(ctx context.Context, opts TokenOptions) (*token.Token, error) { + logger := klog.LoggerWithName(klog.Background(), Namespace) + + sess, err := session.NewSession( + aws.NewConfig(). + WithCredentials(credentials.NewStaticCredentials(opts.AccessKeyID, opts.SecretAccessKey, "")). + WithLogger(awsLogger(logger.V(5))). + WithRegion(opts.Region). + WithLogLevel(aws.LogDebug), + ) + if err != nil { + return nil, fmt.Errorf("error creating session: %w", err) + } + + api := sts.New(sess) + if opts.AssumeRoleARN != "" { + api = sts.New(sess, + aws.NewConfig(). + WithCredentials(stscreds.NewCredentials(sess, opts.AssumeRoleARN)), + ) + } + + // Generate sts:GetCallerIdentity request and add our custom cluster ID header. + req, _ := api.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{}) + req.HTTPRequest.Header.Add(requestClusterIDHeader, opts.Cluster) + + // Sign the request. The expires parameter (sets the x-amz-expires header) is + // currently ignored by STS, and the token expires 15 minutes after the x-amz-date + // timestamp regardless. We set it to 60 seconds for backwards compatibility (the + // parameter is a required argument to Presign(), and authenticators 0.3.0 and older are expecting a value between + // 0 and 60 on the server side). + // https://github.com/aws/aws-sdk-go/issues/2167 + req.SetContext(ctx) + + presignedURLString, err := req.Presign(requestPresignedParam) + if err != nil { + return nil, err + } + + // Set token expiration to 1 minute before the presigned URL expires for some cushion. + tk := &token.Token{ + Expiration: time.Now().Local().Add(presignedURLExpiration - 1*time.Minute), + Value: tokenPrefix + base64.RawURLEncoding.EncodeToString([]byte(presignedURLString)), + } + + return tk, nil +} + +type awsLogger klog.Logger + +func (l awsLogger) Log(args ...any) { + klog.Logger(l).Info(fmt.Sprint(args...)) +} diff --git a/pkg/plugins/azure/client.go b/pkg/plugins/azure/client.go new file mode 100644 index 0000000..123ab88 --- /dev/null +++ b/pkg/plugins/azure/client.go @@ -0,0 +1,139 @@ +package azure + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "os" + + "github.com/spf13/pflag" + "k8s.io/klog/v2" + + "github.com/seal-io/kubecia/pkg/apis" + "github.com/seal-io/kubecia/pkg/bytespool" + "github.com/seal-io/kubecia/pkg/cache" + "github.com/seal-io/kubecia/pkg/consts" + "github.com/seal-io/kubecia/pkg/token" + "github.com/seal-io/kubecia/pkg/version" +) + +type Client struct { + Socket string + ClientID string + ClientSecret string + Tenant string + Resource string +} + +func (cli *Client) AddFlags(flags *pflag.FlagSet) { + flags.StringVar(&cli.Socket, "socket", consts.SocketPath(), "Socket path") + flags.StringVar(&cli.ClientID, "client-id", "", "Azure client ID *") + flags.StringVar(&cli.ClientSecret, "client-secret", "", "Azure client secret *") + flags.StringVar(&cli.Tenant, "tenant", "", "Azure tenant (ID) *") + flags.StringVar(&cli.Resource, "resource", "", "Azure resource (ID) *") +} + +func (cli *Client) GetToken(ctx context.Context) (*token.Token, error) { + logger := klog.LoggerWithName(klog.Background(), Namespace) + + if si, err := os.Stat(cli.Socket); err == nil && si.Mode()&os.ModeSocket != 0 { + logger.V(6).Info("getting from central service") + + tk, err := cli.GetTokenByHTTP(ctx, apis.Client(cli.Socket)) + if err == nil { + logger.V(6).Info("got from central service") + + return tk, nil + } + + var rce remoteCallError + if !errors.As(err, &rce) { + return nil, err + } + + logger.Error(err, "error getting from central service, try getting locally") + } else { + logger.V(6).Info("getting locally") + } + + tk, err := cli.getToken(ctx) + if err == nil { + logger.V(6).Info("got locally") + + return tk, nil + } + + return nil, fmt.Errorf("error getting token locally: %w", err) +} + +func (cli *Client) GetTokenByHTTP(ctx context.Context, httpc *http.Client) (*token.Token, error) { + url := apis.Route(Namespace, cli.Tenant, cli.Resource) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, wrapRemoteCallError(fmt.Errorf("error creating remote request: %w", err)) + } + + req.SetBasicAuth(cli.ClientID, cli.ClientSecret) + + req.Header.Set("User-Agent", version.Get()) + req.Header.Set("X-KubeCIA-DeCapsuled", "true") + + resp, err := httpc.Do(req) + if err != nil { + return nil, wrapRemoteCallError(fmt.Errorf("error making remote request: %w", err)) + } + + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("error response from remote: %s", resp.Status) + } + + buf := bytespool.GetBuffer() + defer bytespool.Put(buf) + + _, err = io.Copy(buf, resp.Body) + if err != nil { + return nil, fmt.Errorf("error copying response body: %w", err) + } + + var tk token.Token + if err = tk.UnmarshalJSON(buf.Bytes()); err != nil { + return nil, fmt.Errorf("error unmarshalling requested token: %w", err) + } + + return &tk, nil +} + +func (cli *Client) getToken(ctx context.Context) (*token.Token, error) { + c, err := cache.NewFile(ctx) + if err != nil { + return nil, fmt.Errorf("error creating cache: %w", err) + } + + defer func() { _ = c.Close() }() + + o := TokenOptions{ + ClientID: cli.ClientID, + ClientSecret: cli.ClientSecret, + Tenant: cli.Tenant, + Resource: cli.Resource, + } + + return GetToken(ctx, o, c) +} + +func wrapRemoteCallError(err error) error { + return remoteCallError{err: err} +} + +type remoteCallError struct { + err error +} + +func (e remoteCallError) Error() string { + return e.err.Error() +} diff --git a/pkg/plugins/azure/server.go b/pkg/plugins/azure/server.go new file mode 100644 index 0000000..0ce8c50 --- /dev/null +++ b/pkg/plugins/azure/server.go @@ -0,0 +1,104 @@ +package azure + +import ( + "context" + "net/http" + "strings" + + "k8s.io/klog/v2" + + "github.com/seal-io/kubecia/pkg/apis" + "github.com/seal-io/kubecia/pkg/apis/server" +) + +const ( + Namespace = "azure" +) + +func Serve(ctx context.Context, mux *http.ServeMux, opts server.ServeOptions) error { + klog.Infof("serving %[1]s: /%[1]s/{tenant}/{resource}\n", Namespace) + + rp := apis.RoutePrefix(Namespace) + hd := http.StripPrefix(rp, &apiServer{ + ServeOptions: opts, + Logger: klog.LoggerWithName(klog.Background(), Namespace), + }) + + mux.Handle(rp, hd) + + return nil +} + +type apiServer struct { + server.ServeOptions + + Logger klog.Logger +} + +func (s *apiServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + c := http.StatusMethodNotAllowed + http.Error(w, http.StatusText(c), c) + + return + } + + var o TokenOptions + + // Authorization: Bearer {clientID:clientSecret}. + { + var found bool + + o.ClientID, o.ClientSecret, found = r.BasicAuth() + if !found { + c := http.StatusUnauthorized + http.Error(w, http.StatusText(c), c) + + return + } + } + + // Path: {tenant}/{resource}. + { + paths := strings.SplitN(r.URL.Path, "/", 2) + if len(paths) < 2 { + c := http.StatusBadRequest + http.Error(w, http.StatusText(c), c) + + return + } + + o.Tenant = paths[0] + o.Resource = paths[1] + } + + tk, err := GetToken(r.Context(), o, s.Cache) + if err != nil { + s.Logger.Error(err, "error getting token") + http.Error(w, err.Error(), http.StatusInternalServerError) + + return + } + + var bs []byte + if r.Header.Get("X-KubeCIA-DeCapsuled") == "true" { + bs, err = tk.MarshalJSON() + } else { + bs, err = tk.ToKubeClientExecCredentialJSON() + } + + if err != nil { + s.Logger.Error(err, "error marshaling token") + http.Error(w, err.Error(), http.StatusInternalServerError) + + return + } + + w.Header().Set("Content-Type", "application/json") + + _, err = w.Write(bs) + if err != nil { + s.Logger.Error(err, "error writing response") + return + } +} diff --git a/pkg/plugins/azure/token.go b/pkg/plugins/azure/token.go new file mode 100644 index 0000000..e89b565 --- /dev/null +++ b/pkg/plugins/azure/token.go @@ -0,0 +1,173 @@ +package azure + +import ( + "context" + "errors" + "fmt" + "os" + "regexp" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "k8s.io/klog/v2" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + + "github.com/seal-io/kubecia/pkg/cache" + "github.com/seal-io/kubecia/pkg/token" +) + +func init() { + logger := klog.LoggerWithName(klog.Background(), Namespace) + + log.SetListener(func(event log.Event, msg string) { + logger.V(5).Info(msg, "event", event) + }) +} + +type TokenOptions struct { + ClientID string + ClientSecret string + Tenant string + Resource string +} + +func (o *TokenOptions) Validate() error { + var requiredTenant bool + + if strings.HasPrefix(o.ClientID, "$") { + o.ClientID = os.ExpandEnv(o.ClientID) + requiredTenant = true + } + + if o.ClientID == "" { + if requiredTenant { + return errors.New("hosted client ID is required") + } + + return errors.New("client ID is required") + } + + if strings.HasPrefix(o.ClientSecret, "$") { + o.ClientSecret = os.ExpandEnv(o.ClientSecret) + requiredTenant = true + } + + if o.ClientSecret == "" { + if requiredTenant { + return errors.New("hosted client secret is required") + } + + return errors.New("client secret is required") + } + + if o.Tenant == "" { + return errors.New("tenant is required") + } + + if o.Resource == "" { + return errors.New("resource is required") + } + + if match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", o.Resource); err != nil || !match { + return errors.New("resource ID must be alphanumeric and contain only '.', ';', '-', and '/' characters") + } + + return nil +} + +func (o *TokenOptions) Key() string { + ss := []string{ + Namespace, + o.ClientID, + o.Tenant, + o.Resource, + } + + return strings.Join(ss, "_") +} + +// GetToken retrieves a token from cache or remote. +func GetToken(ctx context.Context, opts TokenOptions, cacher cache.Cache) (*token.Token, error) { + logger := klog.LoggerWithName(klog.Background(), Namespace) + + err := opts.Validate() + if err != nil { + return nil, fmt.Errorf("invalid options: %w", err) + } + + // Retrieve the token from cache. + ck := opts.Key() + if cacher != nil { + bs, err := cacher.Get(ctx, ck) + if err != nil && !errors.Is(err, cache.ErrEntryNotFound) { + logger.Error(err, "error retrieving token from cache") + } + + if len(bs) != 0 { + var tk token.Token + if err = tk.UnmarshalBinary(bs); err == nil { + if !tk.Expired() { + return &tk, nil + } + } + + if err != nil { + logger.Error(err, "error unmarshalling cached token") + } + } + } + + // Request the token from remote. + tk, err := getToken(ctx, opts) + if err != nil { + return nil, fmt.Errorf("error getting credential token: %w", err) + } + + // Save the token into cache. + if cacher != nil { + bs, err := tk.MarshalBinary() + if err != nil { + logger.Error(err, "error marshaling requested token") + } + + if len(bs) != 0 { + err = cacher.Set(ctx, ck, bs) + if err != nil { + logger.Error(err, "error saving token to cache") + } + } + } + + return tk, nil +} + +// getToken returns the token, inspired by +// https://github.com/Azure/kubelogin/blob/2b43d04d1a57229d67970bf0741c4433faf52f98/pkg/internal/token/azurecli.go#L43. +func getToken(ctx context.Context, opts TokenOptions) (*token.Token, error) { + api, err := azidentity.NewClientSecretCredential( + opts.Tenant, opts.ClientID, opts.ClientSecret, + &azidentity.ClientSecretCredentialOptions{ + AdditionallyAllowedTenants: []string{"*"}, + }) + if err != nil { + return nil, fmt.Errorf("error creating azure client: %w", err) + } + + ak, err := api.GetToken(ctx, policy.TokenRequestOptions{Scopes: []string{opts.Resource}}) + if err != nil { + return nil, fmt.Errorf("error getting token: %w", err) + } + + if ak.Token == "" { + return nil, errors.New("no token found") + } + + tk := &token.Token{ + Expiration: ak.ExpiresOn, + Value: ak.Token, + } + + return tk, nil +} diff --git a/pkg/plugins/gcp/client.go b/pkg/plugins/gcp/client.go new file mode 100644 index 0000000..11248e6 --- /dev/null +++ b/pkg/plugins/gcp/client.go @@ -0,0 +1,139 @@ +package gcp + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "os" + + "github.com/spf13/pflag" + "k8s.io/klog/v2" + + "github.com/seal-io/kubecia/pkg/apis" + "github.com/seal-io/kubecia/pkg/bytespool" + "github.com/seal-io/kubecia/pkg/cache" + "github.com/seal-io/kubecia/pkg/consts" + "github.com/seal-io/kubecia/pkg/token" + "github.com/seal-io/kubecia/pkg/version" +) + +type Client struct { + Socket string + ClientID string + ClientSecret string + Region string + Cluster string +} + +func (cli *Client) AddFlags(flags *pflag.FlagSet) { + flags.StringVar(&cli.Socket, "socket", consts.SocketPath(), "Socket path") + flags.StringVar(&cli.ClientID, "client-id", "", "GCP client ID *") + flags.StringVar(&cli.ClientSecret, "client-secret", "", "GCP client secret *") + flags.StringVar(&cli.Region, "region", "", "GCP region *") + flags.StringVar(&cli.Cluster, "cluster", "", "GCP cluster ID or name *") +} + +func (cli *Client) GetToken(ctx context.Context) (*token.Token, error) { + logger := klog.LoggerWithName(klog.Background(), Namespace) + + if si, err := os.Stat(cli.Socket); err == nil && si.Mode()&os.ModeSocket != 0 { + logger.V(6).Info("getting from central service") + + tk, err := cli.GetTokenByHTTP(ctx, apis.Client(cli.Socket)) + if err == nil { + logger.V(6).Info("got from central service") + + return tk, nil + } + + var rce remoteCallError + if !errors.As(err, &rce) { + return nil, err + } + + logger.Error(err, "error getting from central service, try getting locally") + } else { + logger.V(6).Info("getting locally") + } + + tk, err := cli.getToken(ctx) + if err == nil { + logger.V(6).Info("got locally") + + return tk, nil + } + + return nil, fmt.Errorf("error getting token locally: %w", err) +} + +func (cli *Client) GetTokenByHTTP(ctx context.Context, httpc *http.Client) (*token.Token, error) { + url := apis.Route(Namespace, cli.Region, cli.Cluster) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, wrapRemoteCallError(fmt.Errorf("error creating remote request: %w", err)) + } + + req.SetBasicAuth(cli.ClientID, cli.ClientSecret) + + req.Header.Set("User-Agent", version.Get()) + req.Header.Set("X-KubeCIA-DeCapsuled", "true") + + resp, err := httpc.Do(req) + if err != nil { + return nil, wrapRemoteCallError(fmt.Errorf("error making remote request: %w", err)) + } + + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("error response from remote: %s", resp.Status) + } + + buf := bytespool.GetBuffer() + defer bytespool.Put(buf) + + _, err = io.Copy(buf, resp.Body) + if err != nil { + return nil, fmt.Errorf("error copying response body: %w", err) + } + + var tk token.Token + if err = tk.UnmarshalJSON(buf.Bytes()); err != nil { + return nil, fmt.Errorf("error unmarshalling requested token: %w", err) + } + + return &tk, nil +} + +func (cli *Client) getToken(ctx context.Context) (*token.Token, error) { + c, err := cache.NewFile(ctx) + if err != nil { + return nil, fmt.Errorf("error creating cache: %w", err) + } + + defer func() { _ = c.Close() }() + + o := TokenOptions{ + ClientID: cli.ClientID, + ClientSecret: cli.ClientSecret, + Region: cli.Region, + Cluster: cli.Cluster, + } + + return GetToken(ctx, o, c) +} + +func wrapRemoteCallError(err error) error { + return remoteCallError{err: err} +} + +type remoteCallError struct { + err error +} + +func (e remoteCallError) Error() string { + return e.err.Error() +} diff --git a/pkg/plugins/gcp/server.go b/pkg/plugins/gcp/server.go new file mode 100644 index 0000000..cb83025 --- /dev/null +++ b/pkg/plugins/gcp/server.go @@ -0,0 +1,104 @@ +package gcp + +import ( + "context" + "net/http" + "strings" + + "k8s.io/klog/v2" + + "github.com/seal-io/kubecia/pkg/apis" + "github.com/seal-io/kubecia/pkg/apis/server" +) + +const ( + Namespace = "gcp" +) + +func Serve(ctx context.Context, mux *http.ServeMux, opts server.ServeOptions) error { + klog.Infof("serving %[1]s: /%[1]s/{region}/{cluster}\n", Namespace) + + rp := apis.RoutePrefix(Namespace) + hd := http.StripPrefix(rp, &apiServer{ + ServeOptions: opts, + Logger: klog.LoggerWithName(klog.Background(), Namespace), + }) + + mux.Handle(rp, hd) + + return nil +} + +type apiServer struct { + server.ServeOptions + + Logger klog.Logger +} + +func (s *apiServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + c := http.StatusMethodNotAllowed + http.Error(w, http.StatusText(c), c) + + return + } + + var o TokenOptions + + // Authorization: Bearer {clientID:clientSecret}. + { + var found bool + + o.ClientID, o.ClientSecret, found = r.BasicAuth() + if !found { + c := http.StatusUnauthorized + http.Error(w, http.StatusText(c), c) + + return + } + } + + // Path: {region}/{cluster}. + { + paths := strings.SplitN(r.URL.Path, "/", 2) + if len(paths) < 2 { + c := http.StatusBadRequest + http.Error(w, http.StatusText(c), c) + + return + } + + o.Region = paths[0] + o.Cluster = paths[1] + } + + tk, err := GetToken(r.Context(), o, s.Cache) + if err != nil { + s.Logger.Error(err, "error getting token") + http.Error(w, err.Error(), http.StatusInternalServerError) + + return + } + + var bs []byte + if r.Header.Get("X-KubeCIA-DeCapsuled") == "true" { + bs, err = tk.MarshalJSON() + } else { + bs, err = tk.ToKubeClientExecCredentialJSON() + } + + if err != nil { + s.Logger.Error(err, "error marshaling token") + http.Error(w, err.Error(), http.StatusInternalServerError) + + return + } + + w.Header().Set("Content-Type", "application/json") + + _, err = w.Write(bs) + if err != nil { + s.Logger.Error(err, "error writing response") + return + } +} diff --git a/pkg/plugins/gcp/token.go b/pkg/plugins/gcp/token.go new file mode 100644 index 0000000..45b7116 --- /dev/null +++ b/pkg/plugins/gcp/token.go @@ -0,0 +1,160 @@ +package gcp + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "k8s.io/klog/v2" + + "github.com/seal-io/kubecia/pkg/cache" + "github.com/seal-io/kubecia/pkg/token" +) + +type TokenOptions struct { + ClientID string + ClientSecret string + Region string + Cluster string +} + +func (o *TokenOptions) Validate() error { + var requiredTenant bool + + if strings.HasPrefix(o.ClientID, "$") { + o.ClientID = os.ExpandEnv(o.ClientID) + requiredTenant = true + } + + if o.ClientID == "" { + if requiredTenant { + return errors.New("hosted client ID is required") + } + + return errors.New("client ID is required") + } + + if strings.HasPrefix(o.ClientSecret, "$") { + o.ClientSecret = os.ExpandEnv(o.ClientSecret) + requiredTenant = true + } + + if o.ClientSecret == "" { + if requiredTenant { + return errors.New("hosted client secret is required") + } + + return errors.New("client secret is required") + } + + if o.Region == "" { + return errors.New("region is required") + } + + if o.Cluster == "" { + return errors.New("cluster is required") + } + + return nil +} + +func (o *TokenOptions) Key() string { + ss := []string{ + Namespace, + o.ClientID, + o.Region, + o.Cluster, + } + + return strings.Join(ss, "_") +} + +func GetToken(ctx context.Context, opts TokenOptions, cacher cache.Cache) (*token.Token, error) { + logger := klog.LoggerWithName(klog.Background(), Namespace) + + err := opts.Validate() + if err != nil { + return nil, fmt.Errorf("invalid options: %w", err) + } + + // Retrieve the token from cache. + ck := opts.Key() + if cacher != nil { + bs, err := cacher.Get(ctx, ck) + if err != nil && !errors.Is(err, cache.ErrEntryNotFound) { + logger.Error(err, "error retrieving token from cache") + } + + if len(bs) != 0 { + var tk token.Token + if err = tk.UnmarshalBinary(bs); err == nil { + if !tk.Expired() { + return &tk, nil + } + } + + if err != nil { + logger.Error(err, "error unmarshalling cached token") + } + } + } + + // Request the token from remote. + tk, err := getToken(ctx, opts) + if err != nil { + return nil, fmt.Errorf("error getting credential token: %w", err) + } + + // Save the token into cache. + if cacher != nil { + bs, err := tk.MarshalBinary() + if err != nil { + logger.Error(err, "error marshaling requested token") + } + + if len(bs) != 0 { + err = cacher.Set(ctx, ck, bs) + if err != nil { + logger.Error(err, "error saving token to cache") + } + } + } + + return tk, nil +} + +// getToken returns the token, inspired by +// https://github.com/kubernetes/client-go/blob/v0.22.17/plugin/pkg/client/auth/gcp/gcp.go. +func getToken(ctx context.Context, opts TokenOptions) (*token.Token, error) { + apiCfg := &oauth2.Config{ + ClientID: opts.ClientID, + ClientSecret: opts.ClientSecret, + Scopes: []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email", + }, + Endpoint: oauth2.Endpoint{ + AuthURL: google.Endpoint.AuthURL, + TokenURL: google.Endpoint.TokenURL, + AuthStyle: oauth2.AuthStyleInHeader, + }, + } + + api := apiCfg.TokenSource(ctx, &oauth2.Token{}) + + ak, err := api.Token() + if err != nil { + return nil, fmt.Errorf("error getting token: %w", err) + } + + tk := &token.Token{ + Expiration: ak.Expiry, + Value: ak.AccessToken, + } + + return tk, nil +} diff --git a/pkg/signal/context.go b/pkg/signal/context.go new file mode 100644 index 0000000..70ef1f2 --- /dev/null +++ b/pkg/signal/context.go @@ -0,0 +1,42 @@ +package signal + +import ( + "context" + "errors" + "os" + "os/signal" + + "k8s.io/klog/v2" +) + +var registered = make(chan struct{}) + +// Context registers for signals and returns a context. +func Context() context.Context { + close(registered) // Panics when called twice. + + ctx, cancel := context.WithCancelCause(context.Background()) + + // Register for signals. + ch := make(chan os.Signal, len(shutdownSignals)) + signal.Notify(ch, shutdownSignals...) + + // Process signals. + go func() { + var shutdown bool + + for s := range ch { + klog.V(4).Infof("received signal %q", s) + + if shutdown { + os.Exit(1) + } + + klog.V(4).Info("exiting") + cancel(errors.New("received shutdown signal")) + shutdown = true + } + }() + + return ctx +} diff --git a/pkg/signal/signal_posix.go b/pkg/signal/signal_posix.go new file mode 100644 index 0000000..7ff67e0 --- /dev/null +++ b/pkg/signal/signal_posix.go @@ -0,0 +1,13 @@ +//go:build !windows + +package signal + +import ( + "os" + "syscall" +) + +var shutdownSignals = []os.Signal{ + syscall.SIGINT, + syscall.SIGTERM, +} diff --git a/pkg/signal/signal_windows.go b/pkg/signal/signal_windows.go new file mode 100644 index 0000000..c248a32 --- /dev/null +++ b/pkg/signal/signal_windows.go @@ -0,0 +1,9 @@ +package signal + +import ( + "os" +) + +var shutdownSignals = []os.Signal{ + os.Interrupt, +} diff --git a/pkg/token/token.go b/pkg/token/token.go new file mode 100644 index 0000000..d706ad1 --- /dev/null +++ b/pkg/token/token.go @@ -0,0 +1,91 @@ +package token + +import ( + "bytes" + "encoding/gob" + "time" + + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + clientauth "k8s.io/client-go/pkg/apis/clientauthentication/v1" + "k8s.io/utils/ptr" + + "github.com/seal-io/kubecia/pkg/json" +) + +func init() { + gob.Register(_Token{}) +} + +type ( + Token struct { + Expiration time.Time `json:"expiration,omitempty"` + Value string `json:"value"` + } + + // _Token alias Token, see https://github.com/golang/go/issues/32251. + _Token Token +) + +func (t *Token) Expired() bool { + if t.Expiration.IsZero() { + return true + } + + return t.Expiration.Before(time.Now()) +} + +func (t *Token) MarshalBinary() ([]byte, error) { + var buf bytes.Buffer + + err := gob.NewEncoder(&buf).Encode(_Token(*t)) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (t *Token) UnmarshalBinary(b []byte) error { + var _t _Token + + err := gob.NewDecoder(bytes.NewReader(b)).Decode(&_t) + if err != nil { + return err + } + + *t = Token(_t) + + return nil +} + +func (t *Token) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + + err := json.NewEncoder(&buf).Encode(*t) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (t *Token) UnmarshalJSON(b []byte) error { + return json.NewDecoder(bytes.NewReader(b)).Decode(t) +} + +func (t *Token) ToKubeClientExecCredential() clientauth.ExecCredential { + return clientauth.ExecCredential{ + TypeMeta: meta.TypeMeta{ + APIVersion: clientauth.SchemeGroupVersion.String(), + Kind: "ExecCredential", + }, + Status: &clientauth.ExecCredentialStatus{ + ExpirationTimestamp: ptr.To(meta.NewTime(t.Expiration)), + Token: t.Value, + }, + } +} + +func (t *Token) ToKubeClientExecCredentialJSON() ([]byte, error) { + return json.Marshal(t.ToKubeClientExecCredential()) +} diff --git a/pkg/version/version.go b/pkg/version/version.go new file mode 100644 index 0000000..b995d8e --- /dev/null +++ b/pkg/version/version.go @@ -0,0 +1,75 @@ +package version + +import ( + "fmt" + "runtime" + "strconv" + "strings" + + "golang.org/x/mod/semver" +) + +var ( + Version = "dev" + GitCommit = "HEAD" +) + +func Get() string { + return fmt.Sprintf("%s (%s)", Version, GitCommit) +} + +func GetUserAgent() string { + return GetUserAgentWith("kubecia") +} + +func GetUserAgentWith(name string) string { + return "seal.io/" + name + "; version=" + Get() + "; os=" + runtime.GOOS + "; arch=" + runtime.GOARCH +} + +func Major() string { + vX := semver.Major(Version) + if vX == "" { + return Version + } + + return vX +} + +func MajorMinor() string { + vXy := semver.MajorMinor(Version) + if vXy == "" { + return Version + } + + return vXy +} + +func Previous() string { + vXy := MajorMinor() + if vXy == Version { + return Version + } + + v := strings.Split(vXy, ".") + if v[1] != "0" { + y, _ := strconv.ParseInt(v[1], 10, 64) + y-- + + if y >= 0 { + return v[0] + "." + strconv.FormatInt(y, 10) + } + } + + x, _ := strconv.ParseInt(v[0][1:], 10, 64) + x-- + + if x < 0 { + return Version + } + + return "v" + strconv.FormatInt(x, 10) +} + +func IsValid() bool { + return semver.IsValid(Version) +}