diff --git a/go.mod b/go.mod
index 525699340..b996b5045 100644
--- a/go.mod
+++ b/go.mod
@@ -5,77 +5,78 @@ go 1.22.0
toolchain go1.22.2
require (
+ github.com/go-resty/resty/v2 v2.13.1
github.com/gogo/protobuf v1.3.2
github.com/grafana-tools/sdk v0.0.0-20220919052116-6562121319fc
- github.com/onsi/ginkgo/v2 v2.15.0
- github.com/onsi/gomega v1.32.0
+ github.com/onsi/ginkgo/v2 v2.19.0
+ github.com/onsi/gomega v1.33.1
github.com/pkg/errors v0.9.1
- github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.71.2
- github.com/spf13/cobra v1.8.0
+ github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.1
+ github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
github.com/zeebo/xxh3 v1.0.2
- go.bytebuilders.dev/license-verifier v0.14.0
+ go.bytebuilders.dev/license-verifier v0.14.1
go.openviz.dev/apimachinery v0.0.7
go.openviz.dev/grafana-sdk v0.0.5
- golang.org/x/text v0.15.0
+ golang.org/x/text v0.16.0
gomodules.xyz/logs v0.0.7
gomodules.xyz/pointer v0.1.0
gomodules.xyz/runtime v0.3.0
gomodules.xyz/wait v0.2.0
gomodules.xyz/x v0.0.17
- k8s.io/api v0.29.2
- k8s.io/apiextensions-apiserver v0.29.2
- k8s.io/apimachinery v0.29.2
- k8s.io/apiserver v0.29.2
- k8s.io/client-go v0.29.2
- k8s.io/klog/v2 v2.120.1
- kmodules.xyz/authorizer v0.29.0
- kmodules.xyz/client-go v0.29.14
- kmodules.xyz/custom-resources v0.29.1
+ k8s.io/api v0.30.2
+ k8s.io/apiextensions-apiserver v0.30.2
+ k8s.io/apimachinery v0.30.2
+ k8s.io/apiserver v0.30.2
+ k8s.io/client-go v0.30.2
+ k8s.io/klog/v2 v2.130.1
+ kmodules.xyz/authorizer v0.29.1
+ kmodules.xyz/client-go v0.30.9
+ kmodules.xyz/custom-resources v0.30.0
kmodules.xyz/monitoring-agent-api v0.29.0
- sigs.k8s.io/controller-runtime v0.17.4
+ sigs.k8s.io/controller-runtime v0.18.4
x-helm.dev/apimachinery v0.0.16
)
require (
+ filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Masterminds/semver/v3 v3.2.1 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/PuerkitoBio/purell v1.2.1 // indirect
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
- github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
+ github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
- github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v5.7.0+incompatible // indirect
- github.com/evanphx/json-patch/v5 v5.8.0 // indirect
+ github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
- github.com/go-logr/logr v1.4.1 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.20.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
- github.com/go-resty/resty/v2 v2.13.0 // indirect
- github.com/go-sql-driver/mysql v1.7.1 // indirect
- github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
+ github.com/go-sql-driver/mysql v1.8.1 // indirect
+ github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
- github.com/google/btree v1.1.2 // indirect
- github.com/google/cel-go v0.17.7 // indirect
+ github.com/google/btree v1.0.1 // indirect
+ github.com/google/cel-go v0.17.8 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/go-containerregistry v0.19.1 // indirect
github.com/google/gofuzz v1.2.0 // indirect
- github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 // indirect
+ github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gosimple/slug v1.1.1 // indirect
@@ -84,7 +85,6 @@ require (
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
- github.com/jonboulle/clockwork v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
@@ -92,7 +92,7 @@ require (
github.com/mailru/easyjson v0.7.7 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
- github.com/moby/term v0.5.0 // indirect
+ github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
@@ -125,15 +125,15 @@ require (
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.26.0 // indirect
- golang.org/x/crypto v0.23.0 // indirect
+ golang.org/x/crypto v0.24.0 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
- golang.org/x/net v0.25.0 // indirect
+ golang.org/x/net v0.26.0 // indirect
golang.org/x/oauth2 v0.15.0 // indirect
- golang.org/x/sync v0.6.0 // indirect
- golang.org/x/sys v0.20.0 // indirect
- golang.org/x/term v0.20.0 // indirect
+ golang.org/x/sync v0.7.0 // indirect
+ golang.org/x/sys v0.21.0 // indirect
+ golang.org/x/term v0.21.0 // indirect
golang.org/x/time v0.5.0 // indirect
- golang.org/x/tools v0.18.0 // indirect
+ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
gomodules.xyz/clock v0.0.0-20200817085942-06523dba733f // indirect
gomodules.xyz/flags v0.1.3 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
@@ -150,14 +150,14 @@ require (
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/cli-runtime v0.29.2 // indirect
- k8s.io/component-base v0.29.2 // indirect
+ k8s.io/cli-runtime v0.30.1 // indirect
+ k8s.io/component-base v0.30.2 // indirect
k8s.io/component-helpers v0.29.0 // indirect
- k8s.io/kms v0.29.2 // indirect
- k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 // indirect
- k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect
+ k8s.io/kms v0.30.2 // indirect
+ k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a // indirect
+ k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect
kmodules.xyz/apiversion v0.2.0 // indirect
- sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect
+ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
@@ -167,10 +167,8 @@ require (
replace github.com/Masterminds/sprig/v3 => github.com/gomodules/sprig/v3 v3.2.3-0.20220405051441-0a8a99bac1b8
-replace sigs.k8s.io/controller-runtime => github.com/kmodules/controller-runtime v0.17.5-0.20240506165850-9bc6cbeccc8a
+replace sigs.k8s.io/controller-runtime => github.com/kmodules/controller-runtime v0.18.4-0.20240603164526-fa88ec2314fe
replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.6
-replace k8s.io/apiserver => github.com/kmodules/apiserver v0.29.1-0.20240104121741-1fb217d4a573
-
-replace k8s.io/kubernetes => github.com/kmodules/kubernetes v1.30.0-alpha.0.0.20231224075822-3bd9a13c86db
+replace k8s.io/apiserver => github.com/kmodules/apiserver v0.30.3-0.20240710215914-588ed1f51176
diff --git a/go.sum b/go.sum
index 435516f80..aaa35d8c9 100644
--- a/go.sum
+++ b/go.sum
@@ -16,6 +16,8 @@ cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqCl
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@@ -34,8 +36,8 @@ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/g
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ=
-github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY=
+github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -51,10 +53,12 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chromedp/cdproto v0.0.0-20210526005521-9e51b9051fd0/go.mod h1:At5TxYYdxkbQL0TSefRjhLE3Q0lgvqKKMSFUglJ7i1U=
-github.com/chromedp/cdproto v0.0.0-20210706234513-2bc298e8be7f h1:lg5k1KAxmknil6Z19LaaeiEs5Pje7hPzRfyWSSnWLP0=
github.com/chromedp/cdproto v0.0.0-20210706234513-2bc298e8be7f/go.mod h1:At5TxYYdxkbQL0TSefRjhLE3Q0lgvqKKMSFUglJ7i1U=
-github.com/chromedp/chromedp v0.7.3 h1:FvgJICfjvXtDX+miuMUY0NHuY8zQvjS/TcEQEG6Ldzs=
+github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89 h1:aPflPkRFkVwbW6dmcVqfgwp1i+UWGFH6VgR1Jim5Ygc=
+github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
github.com/chromedp/chromedp v0.7.3/go.mod h1:9gC521Yzgrk078Ulv6KIgG7hJ2x9aWrxMBBobTFk30A=
+github.com/chromedp/chromedp v0.9.2 h1:dKtNz4kApb06KuSXoTQIyUC2TrA0fhGDwNZf3bcgfKw=
+github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
github.com/chromedp/sysutil v1.0.0 h1:+ZxhTpfpZlmchB58ih/LBHX52ky7w2VhQVKQMucy3Ic=
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@@ -74,8 +78,8 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
-github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
@@ -95,8 +99,8 @@ github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBF
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI=
github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro=
-github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
+github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
+github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
@@ -115,8 +119,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
-github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
@@ -129,19 +133,20 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-resty/resty/v2 v2.13.0 h1:joaL6wxSgm1OZal4FAAyddkL1T4uo5NxHYFkGmUusqE=
-github.com/go-resty/resty/v2 v2.13.0/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0=
-github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
-github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g=
+github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0=
+github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
+github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
-github.com/gobwas/ws v1.1.0-rc.5 h1:QOAag7FoBaBYYHRqzqkhhd8fq5RTubvI4v3Ft/gDVVQ=
github.com/gobwas/ws v1.1.0-rc.5/go.mod h1:nzvNcVha5eUziGrbxFCo6qFIojQHjJV5cLYIbezhfL0=
+github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk=
+github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
@@ -173,10 +178,10 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
-github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
-github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ=
-github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
+github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
+github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -197,8 +202,8 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ=
-github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
+github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
+github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
@@ -252,8 +257,8 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
-github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
+github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
+github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
@@ -267,10 +272,10 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/kmodules/apiserver v0.29.1-0.20240104121741-1fb217d4a573 h1:6v7bTFGH/Ha1idq1sLX9px2KJhcx6cpuMowuYRyCht4=
-github.com/kmodules/apiserver v0.29.1-0.20240104121741-1fb217d4a573/go.mod h1:31n78PsRKPmfpee7/l9NYEv67u6hOL6AfcE761HapDM=
-github.com/kmodules/controller-runtime v0.17.5-0.20240506165850-9bc6cbeccc8a h1:u8HKIdgkhL3sN8Xie0+/JyoLbPCLkG8SHxtEpU1Meo0=
-github.com/kmodules/controller-runtime v0.17.5-0.20240506165850-9bc6cbeccc8a/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY=
+github.com/kmodules/apiserver v0.30.3-0.20240710215914-588ed1f51176 h1:STr/uw1doUoVuEwV4DKhgn7ImBKPHRwhX3rlktbiGiA=
+github.com/kmodules/apiserver v0.30.3-0.20240710215914-588ed1f51176/go.mod h1:BOTdFBIch9Sv0ypSEcUR6ew/NUFGocRFNl72Ra7wTm8=
+github.com/kmodules/controller-runtime v0.18.4-0.20240603164526-fa88ec2314fe h1:6nl5dIci8FTzM2hxZ89ufxTXUYqLr9kSGEPPwX87ryk=
+github.com/kmodules/controller-runtime v0.18.4-0.20240603164526-fa88ec2314fe/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@@ -306,8 +311,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
-github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
+github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA=
+github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -324,10 +329,10 @@ github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JX
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
-github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
-github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk=
-github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg=
+github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
+github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
+github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
+github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
@@ -342,8 +347,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.71.2 h1:HZdPRm0ApWPg7F4sHgbqWkL+ddWfpTZsopm5HM/2g4o=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.71.2/go.mod h1:3RiUkFmR9kmPZi9r/8a5jw0a9yg+LMmr7qa0wjqvSiI=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.1 h1:+iiljhJV6niK7MuifJs/n3NeLxikd85nrQfn53sLJkU=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.1/go.mod h1:XYrdZw5dW12Cjkt4ndbeNZZTBp4UCHtW0ccR9+sTtPU=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
@@ -389,8 +394,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
-github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
-github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@@ -407,7 +412,6 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
@@ -435,8 +439,8 @@ github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
-go.bytebuilders.dev/license-verifier v0.14.0 h1:O6pXhz9vz7dPWIJATkX+JiMLhUD2ydzvKzf26c+3Jrw=
-go.bytebuilders.dev/license-verifier v0.14.0/go.mod h1:GB9XTSQUcllJ4AVq29TdJI6yRjoI86HGz0XMqq9nLwY=
+go.bytebuilders.dev/license-verifier v0.14.1 h1:Pk0a4NKgRjMt6eBZATHwLmPUuTQL00kQB3AQoxhCsUE=
+go.bytebuilders.dev/license-verifier v0.14.1/go.mod h1:GB9XTSQUcllJ4AVq29TdJI6yRjoI86HGz0XMqq9nLwY=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
@@ -498,8 +502,9 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
-golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
+golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
+golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -545,8 +550,9 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
-golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
+golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
+golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -561,8 +567,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
-golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -589,16 +595,18 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
+golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
+golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
+golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -608,8 +616,9 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
@@ -637,8 +646,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
-golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -743,44 +752,44 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A=
-k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0=
-k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg=
-k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8=
-k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8=
-k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU=
-k8s.io/cli-runtime v0.29.2 h1:smfsOcT4QujeghsNjECKN3lwyX9AwcFU0nvJ7sFN3ro=
-k8s.io/cli-runtime v0.29.2/go.mod h1:KLisYYfoqeNfO+MkTWvpqIyb1wpJmmFJhioA0xd4MW8=
-k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg=
-k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA=
-k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8=
-k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM=
+k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI=
+k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI=
+k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE=
+k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw=
+k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg=
+k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
+k8s.io/cli-runtime v0.30.1 h1:kSBBpfrJGS6lllc24KeniI9JN7ckOOJKnmFYH1RpTOw=
+k8s.io/cli-runtime v0.30.1/go.mod h1:zhHgbqI4J00pxb6gM3gJPVf2ysDjhQmQtnTxnMScab8=
+k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50=
+k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs=
+k8s.io/component-base v0.30.2 h1:pqGBczYoW1sno8q9ObExUqrYSKhtE5rW3y6gX88GZII=
+k8s.io/component-base v0.30.2/go.mod h1:yQLkQDrkK8J6NtP+MGJOws+/PPeEXNpwFixsUI7h/OE=
k8s.io/component-helpers v0.29.0 h1:Y8W70NGeitKxWwhsPo/vEQbQx5VqJV+3xfLpP3V1VxU=
k8s.io/component-helpers v0.29.0/go.mod h1:j2coxVfmzTOXWSE6sta0MTgNSr572Dcx68F6DD+8fWc=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
-k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kms v0.29.2 h1:MDsbp98gSlEQs7K7dqLKNNTwKFQRYYvO4UOlBOjNy6Y=
-k8s.io/kms v0.29.2/go.mod h1:s/9RC4sYRZ/6Tn6yhNjbfJuZdb8LzlXhdlBnKizeFDo=
-k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 h1:w6nThEmGo9zcL+xH1Tu6pjxJ3K1jXFW+V0u4peqN8ks=
-k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
-k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ=
-k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/kms v0.30.2 h1:VSZILO/tkzrz5Tu2j+yFQZ2Dc5JerQZX2GqhFJbQrfw=
+k8s.io/kms v0.30.2/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4=
+k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a h1:zD1uj3Jf+mD4zmA7W+goE5TxDkI7OGJjBNBzq5fJtLA=
+k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc=
+k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak=
+k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
kmodules.xyz/apiversion v0.2.0 h1:vAQYqZFm4xu4pbB1cAdHbFEPES6EQkcR4wc06xdTOWk=
kmodules.xyz/apiversion v0.2.0/go.mod h1:oPX8g8LvlPdPX3Yc5YvCzJHQnw3YF/X4/jdW0b1am80=
-kmodules.xyz/authorizer v0.29.0 h1:ND8YGeyzExdZ8Bq5Z6UdFO794I6+oPuXbUMWyjlsYgM=
-kmodules.xyz/authorizer v0.29.0/go.mod h1:UQmE3sNXeliebUqjEeD9QYiY+Na27/C5Bg/ekVRfQ3U=
-kmodules.xyz/client-go v0.29.14 h1:h3xkGUZlDwNQkjPbowXm1uJwHxXHoQhvWcR6hVH3FIY=
-kmodules.xyz/client-go v0.29.14/go.mod h1:6Ezo5qTW+Rjd4KBV8XnDRRaiVHDINMvDioJjiDsXmi0=
+kmodules.xyz/authorizer v0.29.1 h1:uByGGoryKbZcfiEAhjcK/Y345I9mygNQP7DVpkMbNQQ=
+kmodules.xyz/authorizer v0.29.1/go.mod h1:kZRhclL8twzyt2bQuJQJbpYww2sc+qFr8I5PPoq/sWY=
+kmodules.xyz/client-go v0.30.9 h1:wiLivFlqVZOitAqLFEa1n53GkbYYOmiR8upjIHPHrYk=
+kmodules.xyz/client-go v0.30.9/go.mod h1:XL3PDQIXG4s3xNRL2SSxIvi8b2WyMGpn26dFnOBz0j4=
kmodules.xyz/crd-schema-fuzz v0.29.1 h1:zJTlWYOrT5dsVVHW8HGcnR/vaWfxQfNh11QwTtkYpcs=
kmodules.xyz/crd-schema-fuzz v0.29.1/go.mod h1:n708z9YQqLMP2KNLQVgBcRJw1QpSWLvpNCEi+KJDOYE=
-kmodules.xyz/custom-resources v0.29.1 h1:xiNylhs3ILRbcUhxxy306AOy9GMA4Mq7xFIptZKgal4=
-kmodules.xyz/custom-resources v0.29.1/go.mod h1:829zDY1EjaxPP52h1T73LZx/vgv8Pld9/uTT/ViZTc0=
+kmodules.xyz/custom-resources v0.30.0 h1:vR3CbseHMLwR4GvtcJJuRuwIV8voKqFqNii27rMcm1o=
+kmodules.xyz/custom-resources v0.30.0/go.mod h1:ZsTuI2mLG2s3byre7bHmpxJ9w0HDqAkRTL1+izGFI24=
kmodules.xyz/monitoring-agent-api v0.29.0 h1:gpFl6OZrlMLb/ySMHdREI9EwGtnJ91oZBn9H1UFRwB4=
kmodules.xyz/monitoring-agent-api v0.29.0/go.mod h1:iNbvaMTgVFOI5q2LJtGK91j4Dmjv4ZRiRdasGmWLKQI=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0=
diff --git a/pkg/cmds/server/operator.go b/pkg/cmds/server/operator.go
index 08cffbae4..fa2f4383f 100644
--- a/pkg/cmds/server/operator.go
+++ b/pkg/cmds/server/operator.go
@@ -33,8 +33,8 @@ import (
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"
cu "kmodules.xyz/client-go/client"
+ clustermeta "kmodules.xyz/client-go/cluster"
"kmodules.xyz/client-go/tools/clientcmd"
- "kmodules.xyz/client-go/tools/clusterid"
appcatalogapi "kmodules.xyz/custom-resources/apis/appcatalog/v1alpha1"
mona "kmodules.xyz/monitoring-agent-api/api/v1"
"sigs.k8s.io/controller-runtime/pkg/cache"
@@ -84,7 +84,7 @@ func NewOperatorOptions() *OperatorOptions {
}
func (s *OperatorOptions) AddGoFlags(fs *flag.FlagSet) {
- clusterid.AddGoFlags(fs)
+ clustermeta.AddGoFlags(fs)
fs.StringVar(&s.MasterURL, "master", s.MasterURL, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
fs.StringVar(&s.KubeconfigPath, "kubeconfig", s.KubeconfigPath, "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
diff --git a/pkg/controllers/servicemonitor/federation_controller.go b/pkg/controllers/servicemonitor/federation_controller.go
index d2b76a4fd..fff290178 100644
--- a/pkg/controllers/servicemonitor/federation_controller.go
+++ b/pkg/controllers/servicemonitor/federation_controller.go
@@ -282,9 +282,9 @@ func (r *FederationReconciler) copyServiceMonitor(prom *monitoringv1.Prometheus,
e.HonorLabels = true // keep original labels
- if len(e.MetricRelabelConfigs) == 0 || !reflect.DeepEqual(keepNSMetrics, *e.MetricRelabelConfigs[0]) {
- e.MetricRelabelConfigs = append([]*monitoringv1.RelabelConfig{
- &keepNSMetrics,
+ if len(e.MetricRelabelConfigs) == 0 || !reflect.DeepEqual(keepNSMetrics, e.MetricRelabelConfigs[0]) {
+ e.MetricRelabelConfigs = append([]monitoringv1.RelabelConfig{
+ keepNSMetrics,
}, e.MetricRelabelConfigs...)
}
obj.Spec.Endpoints[i] = e
diff --git a/vendor/filippo.io/edwards25519/LICENSE b/vendor/filippo.io/edwards25519/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/filippo.io/edwards25519/README.md b/vendor/filippo.io/edwards25519/README.md
new file mode 100644
index 000000000..24e2457d8
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/README.md
@@ -0,0 +1,14 @@
+# filippo.io/edwards25519
+
+```
+import "filippo.io/edwards25519"
+```
+
+This library implements the edwards25519 elliptic curve, exposing the necessary APIs to build a wide array of higher-level primitives.
+Read the docs at [pkg.go.dev/filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519).
+
+The code is originally derived from Adam Langley's internal implementation in the Go standard library, and includes George Tankersley's [performance improvements](https://golang.org/cl/71950). It was then further developed by Henry de Valence for use in ristretto255, and was finally [merged back into the Go standard library](https://golang.org/cl/276272) as of Go 1.17. It now tracks the upstream codebase and extends it with additional functionality.
+
+Most users don't need this package, and should instead use `crypto/ed25519` for signatures, `golang.org/x/crypto/curve25519` for Diffie-Hellman, or `github.com/gtank/ristretto255` for prime order group logic. However, for anyone currently using a fork of `crypto/internal/edwards25519`/`crypto/ed25519/internal/edwards25519` or `github.com/agl/edwards25519`, this package should be a safer, faster, and more powerful alternative.
+
+Since this package is meant to curb proliferation of edwards25519 implementations in the Go ecosystem, it welcomes requests for new APIs or reviewable performance improvements.
diff --git a/vendor/filippo.io/edwards25519/doc.go b/vendor/filippo.io/edwards25519/doc.go
new file mode 100644
index 000000000..ab6aaebc0
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/doc.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package edwards25519 implements group logic for the twisted Edwards curve
+//
+// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2
+//
+// This is better known as the Edwards curve equivalent to Curve25519, and is
+// the curve used by the Ed25519 signature scheme.
+//
+// Most users don't need this package, and should instead use crypto/ed25519 for
+// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or
+// github.com/gtank/ristretto255 for prime order group logic.
+//
+// However, developers who do need to interact with low-level edwards25519
+// operations can use this package, which is an extended version of
+// crypto/internal/edwards25519 from the standard library repackaged as
+// an importable module.
+package edwards25519
diff --git a/vendor/filippo.io/edwards25519/edwards25519.go b/vendor/filippo.io/edwards25519/edwards25519.go
new file mode 100644
index 000000000..a744da2c6
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/edwards25519.go
@@ -0,0 +1,427 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "errors"
+
+ "filippo.io/edwards25519/field"
+)
+
+// Point types.
+
+type projP1xP1 struct {
+ X, Y, Z, T field.Element
+}
+
+type projP2 struct {
+ X, Y, Z field.Element
+}
+
+// Point represents a point on the edwards25519 curve.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is NOT valid, and it may be used only as a receiver.
+type Point struct {
+ // Make the type not comparable (i.e. used with == or as a map key), as
+ // equivalent points can be represented by different Go values.
+ _ incomparable
+
+ // The point is internally represented in extended coordinates (X, Y, Z, T)
+ // where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522.
+ x, y, z, t field.Element
+}
+
+type incomparable [0]func()
+
+func checkInitialized(points ...*Point) {
+ for _, p := range points {
+ if p.x == (field.Element{}) && p.y == (field.Element{}) {
+ panic("edwards25519: use of uninitialized Point")
+ }
+ }
+}
+
+type projCached struct {
+ YplusX, YminusX, Z, T2d field.Element
+}
+
+type affineCached struct {
+ YplusX, YminusX, T2d field.Element
+}
+
+// Constructors.
+
+func (v *projP2) Zero() *projP2 {
+ v.X.Zero()
+ v.Y.One()
+ v.Z.One()
+ return v
+}
+
+// identity is the point at infinity.
+var identity, _ = new(Point).SetBytes([]byte{
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
+
+// NewIdentityPoint returns a new Point set to the identity.
+func NewIdentityPoint() *Point {
+ return new(Point).Set(identity)
+}
+
+// generator is the canonical curve basepoint. See TestGenerator for the
+// correspondence of this encoding with the values in RFC 8032.
+var generator, _ = new(Point).SetBytes([]byte{
+ 0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66})
+
+// NewGeneratorPoint returns a new Point set to the canonical generator.
+func NewGeneratorPoint() *Point {
+ return new(Point).Set(generator)
+}
+
+func (v *projCached) Zero() *projCached {
+ v.YplusX.One()
+ v.YminusX.One()
+ v.Z.One()
+ v.T2d.Zero()
+ return v
+}
+
+func (v *affineCached) Zero() *affineCached {
+ v.YplusX.One()
+ v.YminusX.One()
+ v.T2d.Zero()
+ return v
+}
+
+// Assignments.
+
+// Set sets v = u, and returns v.
+func (v *Point) Set(u *Point) *Point {
+ *v = *u
+ return v
+}
+
+// Encoding.
+
+// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
+// Section 5.1.2.
+func (v *Point) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var buf [32]byte
+ return v.bytes(&buf)
+}
+
+func (v *Point) bytes(buf *[32]byte) []byte {
+ checkInitialized(v)
+
+ var zInv, x, y field.Element
+ zInv.Invert(&v.z) // zInv = 1 / Z
+ x.Multiply(&v.x, &zInv) // x = X / Z
+ y.Multiply(&v.y, &zInv) // y = Y / Z
+
+ out := copyFieldElement(buf, &y)
+ out[31] |= byte(x.IsNegative() << 7)
+ return out
+}
+
+var feOne = new(field.Element).One()
+
+// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not
+// represent a valid point on the curve, SetBytes returns nil and an error and
+// the receiver is unchanged. Otherwise, SetBytes returns v.
+//
+// Note that SetBytes accepts all non-canonical encodings of valid points.
+// That is, it follows decoding rules that match most implementations in
+// the ecosystem rather than RFC 8032.
+func (v *Point) SetBytes(x []byte) (*Point, error) {
+ // Specifically, the non-canonical encodings that are accepted are
+ // 1) the ones where the field element is not reduced (see the
+ // (*field.Element).SetBytes docs) and
+ // 2) the ones where the x-coordinate is zero and the sign bit is set.
+ //
+ // Read more at https://hdevalence.ca/blog/2020-10-04-its-25519am,
+ // specifically the "Canonical A, R" section.
+
+ y, err := new(field.Element).SetBytes(x)
+ if err != nil {
+ return nil, errors.New("edwards25519: invalid point encoding length")
+ }
+
+ // -x² + y² = 1 + dx²y²
+ // x² + dx²y² = x²(dy² + 1) = y² - 1
+ // x² = (y² - 1) / (dy² + 1)
+
+ // u = y² - 1
+ y2 := new(field.Element).Square(y)
+ u := new(field.Element).Subtract(y2, feOne)
+
+ // v = dy² + 1
+ vv := new(field.Element).Multiply(y2, d)
+ vv = vv.Add(vv, feOne)
+
+ // x = +√(u/v)
+ xx, wasSquare := new(field.Element).SqrtRatio(u, vv)
+ if wasSquare == 0 {
+ return nil, errors.New("edwards25519: invalid point encoding")
+ }
+
+ // Select the negative square root if the sign bit is set.
+ xxNeg := new(field.Element).Negate(xx)
+ xx = xx.Select(xxNeg, xx, int(x[31]>>7))
+
+ v.x.Set(xx)
+ v.y.Set(y)
+ v.z.One()
+ v.t.Multiply(xx, y) // xy = T / Z
+
+ return v, nil
+}
+
+func copyFieldElement(buf *[32]byte, v *field.Element) []byte {
+ copy(buf[:], v.Bytes())
+ return buf[:]
+}
+
+// Conversions.
+
+func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 {
+ v.X.Multiply(&p.X, &p.T)
+ v.Y.Multiply(&p.Y, &p.Z)
+ v.Z.Multiply(&p.Z, &p.T)
+ return v
+}
+
+func (v *projP2) FromP3(p *Point) *projP2 {
+ v.X.Set(&p.x)
+ v.Y.Set(&p.y)
+ v.Z.Set(&p.z)
+ return v
+}
+
+func (v *Point) fromP1xP1(p *projP1xP1) *Point {
+ v.x.Multiply(&p.X, &p.T)
+ v.y.Multiply(&p.Y, &p.Z)
+ v.z.Multiply(&p.Z, &p.T)
+ v.t.Multiply(&p.X, &p.Y)
+ return v
+}
+
+func (v *Point) fromP2(p *projP2) *Point {
+ v.x.Multiply(&p.X, &p.Z)
+ v.y.Multiply(&p.Y, &p.Z)
+ v.z.Square(&p.Z)
+ v.t.Multiply(&p.X, &p.Y)
+ return v
+}
+
+// d is a constant in the curve equation.
+var d, _ = new(field.Element).SetBytes([]byte{
+ 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
+ 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
+ 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
+ 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52})
+var d2 = new(field.Element).Add(d, d)
+
+func (v *projCached) FromP3(p *Point) *projCached {
+ v.YplusX.Add(&p.y, &p.x)
+ v.YminusX.Subtract(&p.y, &p.x)
+ v.Z.Set(&p.z)
+ v.T2d.Multiply(&p.t, d2)
+ return v
+}
+
+func (v *affineCached) FromP3(p *Point) *affineCached {
+ v.YplusX.Add(&p.y, &p.x)
+ v.YminusX.Subtract(&p.y, &p.x)
+ v.T2d.Multiply(&p.t, d2)
+
+ var invZ field.Element
+ invZ.Invert(&p.z)
+ v.YplusX.Multiply(&v.YplusX, &invZ)
+ v.YminusX.Multiply(&v.YminusX, &invZ)
+ v.T2d.Multiply(&v.T2d, &invZ)
+ return v
+}
+
+// (Re)addition and subtraction.
+
+// Add sets v = p + q, and returns v.
+func (v *Point) Add(p, q *Point) *Point {
+ checkInitialized(p, q)
+ qCached := new(projCached).FromP3(q)
+ result := new(projP1xP1).Add(p, qCached)
+ return v.fromP1xP1(result)
+}
+
+// Subtract sets v = p - q, and returns v.
+func (v *Point) Subtract(p, q *Point) *Point {
+ checkInitialized(p, q)
+ qCached := new(projCached).FromP3(q)
+ result := new(projP1xP1).Sub(p, qCached)
+ return v.fromP1xP1(result)
+}
+
+func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YplusX)
+ MM.Multiply(&YminusX, &q.YminusX)
+ TT2d.Multiply(&p.t, &q.T2d)
+ ZZ2.Multiply(&p.z, &q.Z)
+
+ ZZ2.Add(&ZZ2, &ZZ2)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Add(&ZZ2, &TT2d)
+ v.T.Subtract(&ZZ2, &TT2d)
+ return v
+}
+
+func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+ MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+ TT2d.Multiply(&p.t, &q.T2d)
+ ZZ2.Multiply(&p.z, &q.Z)
+
+ ZZ2.Add(&ZZ2, &ZZ2)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Subtract(&ZZ2, &TT2d) // flipped sign
+ v.T.Add(&ZZ2, &TT2d) // flipped sign
+ return v
+}
+
+func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YplusX)
+ MM.Multiply(&YminusX, &q.YminusX)
+ TT2d.Multiply(&p.t, &q.T2d)
+
+ Z2.Add(&p.z, &p.z)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Add(&Z2, &TT2d)
+ v.T.Subtract(&Z2, &TT2d)
+ return v
+}
+
+func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 {
+ var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
+
+ YplusX.Add(&p.y, &p.x)
+ YminusX.Subtract(&p.y, &p.x)
+
+ PP.Multiply(&YplusX, &q.YminusX) // flipped sign
+ MM.Multiply(&YminusX, &q.YplusX) // flipped sign
+ TT2d.Multiply(&p.t, &q.T2d)
+
+ Z2.Add(&p.z, &p.z)
+
+ v.X.Subtract(&PP, &MM)
+ v.Y.Add(&PP, &MM)
+ v.Z.Subtract(&Z2, &TT2d) // flipped sign
+ v.T.Add(&Z2, &TT2d) // flipped sign
+ return v
+}
+
+// Doubling.
+
+func (v *projP1xP1) Double(p *projP2) *projP1xP1 {
+ var XX, YY, ZZ2, XplusYsq field.Element
+
+ XX.Square(&p.X)
+ YY.Square(&p.Y)
+ ZZ2.Square(&p.Z)
+ ZZ2.Add(&ZZ2, &ZZ2)
+ XplusYsq.Add(&p.X, &p.Y)
+ XplusYsq.Square(&XplusYsq)
+
+ v.Y.Add(&YY, &XX)
+ v.Z.Subtract(&YY, &XX)
+
+ v.X.Subtract(&XplusYsq, &v.Y)
+ v.T.Subtract(&ZZ2, &v.Z)
+ return v
+}
+
+// Negation.
+
+// Negate sets v = -p, and returns v.
+func (v *Point) Negate(p *Point) *Point {
+ checkInitialized(p)
+ v.x.Negate(&p.x)
+ v.y.Set(&p.y)
+ v.z.Set(&p.z)
+ v.t.Negate(&p.t)
+ return v
+}
+
+// Equal returns 1 if v is equivalent to u, and 0 otherwise.
+func (v *Point) Equal(u *Point) int {
+ checkInitialized(v, u)
+
+ var t1, t2, t3, t4 field.Element
+ t1.Multiply(&v.x, &u.z)
+ t2.Multiply(&u.x, &v.z)
+ t3.Multiply(&v.y, &u.z)
+ t4.Multiply(&u.y, &v.z)
+
+ return t1.Equal(&t2) & t3.Equal(&t4)
+}
+
+// Constant-time operations
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *projCached) Select(a, b *projCached, cond int) *projCached {
+ v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+ v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+ v.Z.Select(&a.Z, &b.Z, cond)
+ v.T2d.Select(&a.T2d, &b.T2d, cond)
+ return v
+}
+
+// Select sets v to a if cond == 1 and to b if cond == 0.
+func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached {
+ v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
+ v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
+ v.T2d.Select(&a.T2d, &b.T2d, cond)
+ return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *projCached) CondNeg(cond int) *projCached {
+ v.YplusX.Swap(&v.YminusX, cond)
+ v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+ return v
+}
+
+// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
+func (v *affineCached) CondNeg(cond int) *affineCached {
+ v.YplusX.Swap(&v.YminusX, cond)
+ v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
+ return v
+}
diff --git a/vendor/filippo.io/edwards25519/extra.go b/vendor/filippo.io/edwards25519/extra.go
new file mode 100644
index 000000000..d152d68ff
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/extra.go
@@ -0,0 +1,349 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+// This file contains additional functionality that is not included in the
+// upstream crypto/internal/edwards25519 package.
+
+import (
+ "errors"
+
+ "filippo.io/edwards25519/field"
+)
+
+// ExtendedCoordinates returns v in extended coordinates (X:Y:Z:T) where
+// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
+func (v *Point) ExtendedCoordinates() (X, Y, Z, T *field.Element) {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap. Don't change the style without making
+ // sure it doesn't increase the inliner cost.
+ var e [4]field.Element
+ X, Y, Z, T = v.extendedCoordinates(&e)
+ return
+}
+
+func (v *Point) extendedCoordinates(e *[4]field.Element) (X, Y, Z, T *field.Element) {
+ checkInitialized(v)
+ X = e[0].Set(&v.x)
+ Y = e[1].Set(&v.y)
+ Z = e[2].Set(&v.z)
+ T = e[3].Set(&v.t)
+ return
+}
+
+// SetExtendedCoordinates sets v = (X:Y:Z:T) in extended coordinates where
+// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
+//
+// If the coordinates are invalid or don't represent a valid point on the curve,
+// SetExtendedCoordinates returns nil and an error and the receiver is
+// unchanged. Otherwise, SetExtendedCoordinates returns v.
+func (v *Point) SetExtendedCoordinates(X, Y, Z, T *field.Element) (*Point, error) {
+ if !isOnCurve(X, Y, Z, T) {
+ return nil, errors.New("edwards25519: invalid point coordinates")
+ }
+ v.x.Set(X)
+ v.y.Set(Y)
+ v.z.Set(Z)
+ v.t.Set(T)
+ return v, nil
+}
+
+func isOnCurve(X, Y, Z, T *field.Element) bool {
+ var lhs, rhs field.Element
+ XX := new(field.Element).Square(X)
+ YY := new(field.Element).Square(Y)
+ ZZ := new(field.Element).Square(Z)
+ TT := new(field.Element).Square(T)
+ // -x² + y² = 1 + dx²y²
+ // -(X/Z)² + (Y/Z)² = 1 + d(T/Z)²
+ // -X² + Y² = Z² + dT²
+ lhs.Subtract(YY, XX)
+ rhs.Multiply(d, TT).Add(&rhs, ZZ)
+ if lhs.Equal(&rhs) != 1 {
+ return false
+ }
+ // xy = T/Z
+ // XY/Z² = T/Z
+ // XY = TZ
+ lhs.Multiply(X, Y)
+ rhs.Multiply(T, Z)
+ return lhs.Equal(&rhs) == 1
+}
+
+// BytesMontgomery converts v to a point on the birationally-equivalent
+// Curve25519 Montgomery curve, and returns its canonical 32 bytes encoding
+// according to RFC 7748.
+//
+// Note that BytesMontgomery only encodes the u-coordinate, so v and -v encode
+// to the same value. If v is the identity point, BytesMontgomery returns 32
+// zero bytes, analogously to the X25519 function.
+//
+// The lack of an inverse operation (such as SetMontgomeryBytes) is deliberate:
+// while every valid edwards25519 point has a unique u-coordinate Montgomery
+// encoding, X25519 accepts inputs on the quadratic twist, which don't correspond
+// to any edwards25519 point, and every other X25519 input corresponds to two
+// edwards25519 points.
+func (v *Point) BytesMontgomery() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var buf [32]byte
+ return v.bytesMontgomery(&buf)
+}
+
+func (v *Point) bytesMontgomery(buf *[32]byte) []byte {
+ checkInitialized(v)
+
+ // RFC 7748, Section 4.1 provides the bilinear map to calculate the
+ // Montgomery u-coordinate
+ //
+ // u = (1 + y) / (1 - y)
+ //
+ // where y = Y / Z.
+
+ var y, recip, u field.Element
+
+ y.Multiply(&v.y, y.Invert(&v.z)) // y = Y / Z
+ recip.Invert(recip.Subtract(feOne, &y)) // r = 1/(1 - y)
+ u.Multiply(u.Add(feOne, &y), &recip) // u = (1 + y)*r
+
+ return copyFieldElement(buf, &u)
+}
+
+// MultByCofactor sets v = 8 * p, and returns v.
+func (v *Point) MultByCofactor(p *Point) *Point {
+ checkInitialized(p)
+ result := projP1xP1{}
+ pp := (&projP2{}).FromP3(p)
+ result.Double(pp)
+ pp.FromP1xP1(&result)
+ result.Double(pp)
+ pp.FromP1xP1(&result)
+ result.Double(pp)
+ return v.fromP1xP1(&result)
+}
+
+// Given k > 0, set s = s**(2*i).
+func (s *Scalar) pow2k(k int) {
+ for i := 0; i < k; i++ {
+ s.Multiply(s, s)
+ }
+}
+
+// Invert sets s to the inverse of a nonzero scalar v, and returns s.
+//
+// If t is zero, Invert returns zero.
+func (s *Scalar) Invert(t *Scalar) *Scalar {
+ // Uses a hardcoded sliding window of width 4.
+ var table [8]Scalar
+ var tt Scalar
+ tt.Multiply(t, t)
+ table[0] = *t
+ for i := 0; i < 7; i++ {
+ table[i+1].Multiply(&table[i], &tt)
+ }
+ // Now table = [t**1, t**3, t**5, t**7, t**9, t**11, t**13, t**15]
+ // so t**k = t[k/2] for odd k
+
+ // To compute the sliding window digits, use the following Sage script:
+
+ // sage: import itertools
+ // sage: def sliding_window(w,k):
+ // ....: digits = []
+ // ....: while k > 0:
+ // ....: if k % 2 == 1:
+ // ....: kmod = k % (2**w)
+ // ....: digits.append(kmod)
+ // ....: k = k - kmod
+ // ....: else:
+ // ....: digits.append(0)
+ // ....: k = k // 2
+ // ....: return digits
+
+ // Now we can compute s roughly as follows:
+
+ // sage: s = 1
+ // sage: for coeff in reversed(sliding_window(4,l-2)):
+ // ....: s = s*s
+ // ....: if coeff > 0 :
+ // ....: s = s*t**coeff
+
+ // This works on one bit at a time, with many runs of zeros.
+ // The digits can be collapsed into [(count, coeff)] as follows:
+
+ // sage: [(len(list(group)),d) for d,group in itertools.groupby(sliding_window(4,l-2))]
+
+ // Entries of the form (k, 0) turn into pow2k(k)
+ // Entries of the form (1, coeff) turn into a squaring and then a table lookup.
+ // We can fold the squaring into the previous pow2k(k) as pow2k(k+1).
+
+ *s = table[1/2]
+ s.pow2k(127 + 1)
+ s.Multiply(s, &table[1/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[11/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[13/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[5/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[1/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[11/2])
+ s.pow2k(5 + 1)
+ s.Multiply(s, &table[11/2])
+ s.pow2k(9 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[3/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[13/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[7/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[9/2])
+ s.pow2k(3 + 1)
+ s.Multiply(s, &table[15/2])
+ s.pow2k(4 + 1)
+ s.Multiply(s, &table[11/2])
+
+ return s
+}
+
+// MultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
+//
+// Execution time depends only on the lengths of the two slices, which must match.
+func (v *Point) MultiScalarMult(scalars []*Scalar, points []*Point) *Point {
+ if len(scalars) != len(points) {
+ panic("edwards25519: called MultiScalarMult with different size inputs")
+ }
+ checkInitialized(points...)
+
+ // Proceed as in the single-base case, but share doublings
+ // between each point in the multiscalar equation.
+
+ // Build lookup tables for each point
+ tables := make([]projLookupTable, len(points))
+ for i := range tables {
+ tables[i].FromP3(points[i])
+ }
+ // Compute signed radix-16 digits for each scalar
+ digits := make([][64]int8, len(scalars))
+ for i := range digits {
+ digits[i] = scalars[i].signedRadix16()
+ }
+
+ // Unwrap first loop iteration to save computing 16*identity
+ multiple := &projCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ // Lookup-and-add the appropriate multiple of each input point
+ for j := range tables {
+ tables[j].SelectInto(multiple, digits[j][63])
+ tmp1.Add(v, multiple) // tmp1 = v + x_(j,63)*Q in P1xP1 coords
+ v.fromP1xP1(tmp1) // update v
+ }
+ tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
+ for i := 62; i >= 0; i-- {
+ tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
+ v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
+ // Lookup-and-add the appropriate multiple of each input point
+ for j := range tables {
+ tables[j].SelectInto(multiple, digits[j][i])
+ tmp1.Add(v, multiple) // tmp1 = v + x_(j,i)*Q in P1xP1 coords
+ v.fromP1xP1(tmp1) // update v
+ }
+ tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
+ }
+ return v
+}
+
+// VarTimeMultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
+//
+// Execution time depends on the inputs.
+func (v *Point) VarTimeMultiScalarMult(scalars []*Scalar, points []*Point) *Point {
+ if len(scalars) != len(points) {
+ panic("edwards25519: called VarTimeMultiScalarMult with different size inputs")
+ }
+ checkInitialized(points...)
+
+ // Generalize double-base NAF computation to arbitrary sizes.
+ // Here all the points are dynamic, so we only use the smaller
+ // tables.
+
+ // Build lookup tables for each point
+ tables := make([]nafLookupTable5, len(points))
+ for i := range tables {
+ tables[i].FromP3(points[i])
+ }
+ // Compute a NAF for each scalar
+ nafs := make([][256]int8, len(scalars))
+ for i := range nafs {
+ nafs[i] = scalars[i].nonAdjacentForm(5)
+ }
+
+ multiple := &projCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ tmp2.Zero()
+
+ // Move from high to low bits, doubling the accumulator
+ // at each iteration and checking whether there is a nonzero
+ // coefficient to look up a multiple of.
+ //
+ // Skip trying to find the first nonzero coefficent, because
+ // searching might be more work than a few extra doublings.
+ for i := 255; i >= 0; i-- {
+ tmp1.Double(tmp2)
+
+ for j := range nafs {
+ if nafs[j][i] > 0 {
+ v.fromP1xP1(tmp1)
+ tables[j].SelectInto(multiple, nafs[j][i])
+ tmp1.Add(v, multiple)
+ } else if nafs[j][i] < 0 {
+ v.fromP1xP1(tmp1)
+ tables[j].SelectInto(multiple, -nafs[j][i])
+ tmp1.Sub(v, multiple)
+ }
+ }
+
+ tmp2.FromP1xP1(tmp1)
+ }
+
+ v.fromP2(tmp2)
+ return v
+}
diff --git a/vendor/filippo.io/edwards25519/field/fe.go b/vendor/filippo.io/edwards25519/field/fe.go
new file mode 100644
index 000000000..5518ef2b9
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe.go
@@ -0,0 +1,420 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package field implements fast arithmetic modulo 2^255-19.
+package field
+
+import (
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "math/bits"
+)
+
+// Element represents an element of the field GF(2^255-19). Note that this
+// is not a cryptographically secure group, and should only be used to interact
+// with edwards25519.Point coordinates.
+//
+// This type works similarly to math/big.Int, and all arguments and receivers
+// are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Element struct {
+ // An element t represents the integer
+ // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
+ //
+ // Between operations, all limbs are expected to be lower than 2^52.
+ l0 uint64
+ l1 uint64
+ l2 uint64
+ l3 uint64
+ l4 uint64
+}
+
+const maskLow51Bits uint64 = (1 << 51) - 1
+
+var feZero = &Element{0, 0, 0, 0, 0}
+
+// Zero sets v = 0, and returns v.
+func (v *Element) Zero() *Element {
+ *v = *feZero
+ return v
+}
+
+var feOne = &Element{1, 0, 0, 0, 0}
+
+// One sets v = 1, and returns v.
+func (v *Element) One() *Element {
+ *v = *feOne
+ return v
+}
+
+// reduce reduces v modulo 2^255 - 19 and returns it.
+func (v *Element) reduce() *Element {
+ v.carryPropagate()
+
+ // After the light reduction we now have a field element representation
+ // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
+
+ // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
+ // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
+ c := (v.l0 + 19) >> 51
+ c = (v.l1 + c) >> 51
+ c = (v.l2 + c) >> 51
+ c = (v.l3 + c) >> 51
+ c = (v.l4 + c) >> 51
+
+ // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
+ // effectively applying the reduction identity to the carry.
+ v.l0 += 19 * c
+
+ v.l1 += v.l0 >> 51
+ v.l0 = v.l0 & maskLow51Bits
+ v.l2 += v.l1 >> 51
+ v.l1 = v.l1 & maskLow51Bits
+ v.l3 += v.l2 >> 51
+ v.l2 = v.l2 & maskLow51Bits
+ v.l4 += v.l3 >> 51
+ v.l3 = v.l3 & maskLow51Bits
+ // no additional carry
+ v.l4 = v.l4 & maskLow51Bits
+
+ return v
+}
+
+// Add sets v = a + b, and returns v.
+func (v *Element) Add(a, b *Element) *Element {
+ v.l0 = a.l0 + b.l0
+ v.l1 = a.l1 + b.l1
+ v.l2 = a.l2 + b.l2
+ v.l3 = a.l3 + b.l3
+ v.l4 = a.l4 + b.l4
+ // Using the generic implementation here is actually faster than the
+ // assembly. Probably because the body of this function is so simple that
+ // the compiler can figure out better optimizations by inlining the carry
+ // propagation.
+ return v.carryPropagateGeneric()
+}
+
+// Subtract sets v = a - b, and returns v.
+func (v *Element) Subtract(a, b *Element) *Element {
+ // We first add 2 * p, to guarantee the subtraction won't underflow, and
+ // then subtract b (which can be up to 2^255 + 2^13 * 19).
+ v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
+ v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
+ v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
+ v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
+ v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
+ return v.carryPropagate()
+}
+
+// Negate sets v = -a, and returns v.
+func (v *Element) Negate(a *Element) *Element {
+ return v.Subtract(feZero, a)
+}
+
+// Invert sets v = 1/z mod p, and returns v.
+//
+// If z == 0, Invert returns v = 0.
+func (v *Element) Invert(z *Element) *Element {
+ // Inversion is implemented as exponentiation with exponent p − 2. It uses the
+ // same sequence of 255 squarings and 11 multiplications as [Curve25519].
+ var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
+
+ z2.Square(z) // 2
+ t.Square(&z2) // 4
+ t.Square(&t) // 8
+ z9.Multiply(&t, z) // 9
+ z11.Multiply(&z9, &z2) // 11
+ t.Square(&z11) // 22
+ z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
+
+ t.Square(&z2_5_0) // 2^6 - 2^1
+ for i := 0; i < 4; i++ {
+ t.Square(&t) // 2^10 - 2^5
+ }
+ z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
+
+ t.Square(&z2_10_0) // 2^11 - 2^1
+ for i := 0; i < 9; i++ {
+ t.Square(&t) // 2^20 - 2^10
+ }
+ z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
+
+ t.Square(&z2_20_0) // 2^21 - 2^1
+ for i := 0; i < 19; i++ {
+ t.Square(&t) // 2^40 - 2^20
+ }
+ t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
+
+ t.Square(&t) // 2^41 - 2^1
+ for i := 0; i < 9; i++ {
+ t.Square(&t) // 2^50 - 2^10
+ }
+ z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
+
+ t.Square(&z2_50_0) // 2^51 - 2^1
+ for i := 0; i < 49; i++ {
+ t.Square(&t) // 2^100 - 2^50
+ }
+ z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
+
+ t.Square(&z2_100_0) // 2^101 - 2^1
+ for i := 0; i < 99; i++ {
+ t.Square(&t) // 2^200 - 2^100
+ }
+ t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
+
+ t.Square(&t) // 2^201 - 2^1
+ for i := 0; i < 49; i++ {
+ t.Square(&t) // 2^250 - 2^50
+ }
+ t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
+
+ t.Square(&t) // 2^251 - 2^1
+ t.Square(&t) // 2^252 - 2^2
+ t.Square(&t) // 2^253 - 2^3
+ t.Square(&t) // 2^254 - 2^4
+ t.Square(&t) // 2^255 - 2^5
+
+ return v.Multiply(&t, &z11) // 2^255 - 21
+}
+
+// Set sets v = a, and returns v.
+func (v *Element) Set(a *Element) *Element {
+ *v = *a
+ return v
+}
+
+// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is
+// not of the right length, SetBytes returns nil and an error, and the
+// receiver is unchanged.
+//
+// Consistent with RFC 7748, the most significant bit (the high bit of the
+// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
+// are accepted. Note that this is laxer than specified by RFC 8032, but
+// consistent with most Ed25519 implementations.
+func (v *Element) SetBytes(x []byte) (*Element, error) {
+ if len(x) != 32 {
+ return nil, errors.New("edwards25519: invalid field element input size")
+ }
+
+ // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
+ v.l0 = binary.LittleEndian.Uint64(x[0:8])
+ v.l0 &= maskLow51Bits
+ // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
+ v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
+ v.l1 &= maskLow51Bits
+ // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
+ v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
+ v.l2 &= maskLow51Bits
+ // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
+ v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
+ v.l3 &= maskLow51Bits
+ // Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51).
+ // Note: not bytes 25:33, shift 4, to avoid overread.
+ v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
+ v.l4 &= maskLow51Bits
+
+ return v, nil
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of v.
+func (v *Element) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var out [32]byte
+ return v.bytes(&out)
+}
+
+func (v *Element) bytes(out *[32]byte) []byte {
+ t := *v
+ t.reduce()
+
+ var buf [8]byte
+ for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
+ bitsOffset := i * 51
+ binary.LittleEndian.PutUint64(buf[:], l<= len(out) {
+ break
+ }
+ out[off] |= bb
+ }
+ }
+
+ return out[:]
+}
+
+// Equal returns 1 if v and u are equal, and 0 otherwise.
+func (v *Element) Equal(u *Element) int {
+ sa, sv := u.Bytes(), v.Bytes()
+ return subtle.ConstantTimeCompare(sa, sv)
+}
+
+// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
+func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
+
+// Select sets v to a if cond == 1, and to b if cond == 0.
+func (v *Element) Select(a, b *Element, cond int) *Element {
+ m := mask64Bits(cond)
+ v.l0 = (m & a.l0) | (^m & b.l0)
+ v.l1 = (m & a.l1) | (^m & b.l1)
+ v.l2 = (m & a.l2) | (^m & b.l2)
+ v.l3 = (m & a.l3) | (^m & b.l3)
+ v.l4 = (m & a.l4) | (^m & b.l4)
+ return v
+}
+
+// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
+func (v *Element) Swap(u *Element, cond int) {
+ m := mask64Bits(cond)
+ t := m & (v.l0 ^ u.l0)
+ v.l0 ^= t
+ u.l0 ^= t
+ t = m & (v.l1 ^ u.l1)
+ v.l1 ^= t
+ u.l1 ^= t
+ t = m & (v.l2 ^ u.l2)
+ v.l2 ^= t
+ u.l2 ^= t
+ t = m & (v.l3 ^ u.l3)
+ v.l3 ^= t
+ u.l3 ^= t
+ t = m & (v.l4 ^ u.l4)
+ v.l4 ^= t
+ u.l4 ^= t
+}
+
+// IsNegative returns 1 if v is negative, and 0 otherwise.
+func (v *Element) IsNegative() int {
+ return int(v.Bytes()[0] & 1)
+}
+
+// Absolute sets v to |u|, and returns v.
+func (v *Element) Absolute(u *Element) *Element {
+ return v.Select(new(Element).Negate(u), u, u.IsNegative())
+}
+
+// Multiply sets v = x * y, and returns v.
+func (v *Element) Multiply(x, y *Element) *Element {
+ feMul(v, x, y)
+ return v
+}
+
+// Square sets v = x * x, and returns v.
+func (v *Element) Square(x *Element) *Element {
+ feSquare(v, x)
+ return v
+}
+
+// Mult32 sets v = x * y, and returns v.
+func (v *Element) Mult32(x *Element, y uint32) *Element {
+ x0lo, x0hi := mul51(x.l0, y)
+ x1lo, x1hi := mul51(x.l1, y)
+ x2lo, x2hi := mul51(x.l2, y)
+ x3lo, x3hi := mul51(x.l3, y)
+ x4lo, x4hi := mul51(x.l4, y)
+ v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
+ v.l1 = x1lo + x0hi
+ v.l2 = x2lo + x1hi
+ v.l3 = x3lo + x2hi
+ v.l4 = x4lo + x3hi
+ // The hi portions are going to be only 32 bits, plus any previous excess,
+ // so we can skip the carry propagation.
+ return v
+}
+
+// mul51 returns lo + hi * 2⁵¹ = a * b.
+func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
+ mh, ml := bits.Mul64(a, uint64(b))
+ lo = ml & maskLow51Bits
+ hi = (mh << 13) | (ml >> 51)
+ return
+}
+
+// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
+func (v *Element) Pow22523(x *Element) *Element {
+ var t0, t1, t2 Element
+
+ t0.Square(x) // x^2
+ t1.Square(&t0) // x^4
+ t1.Square(&t1) // x^8
+ t1.Multiply(x, &t1) // x^9
+ t0.Multiply(&t0, &t1) // x^11
+ t0.Square(&t0) // x^22
+ t0.Multiply(&t1, &t0) // x^31
+ t1.Square(&t0) // x^62
+ for i := 1; i < 5; i++ { // x^992
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1
+ t1.Square(&t0) // 2^11 - 2
+ for i := 1; i < 10; i++ { // 2^20 - 2^10
+ t1.Square(&t1)
+ }
+ t1.Multiply(&t1, &t0) // 2^20 - 1
+ t2.Square(&t1) // 2^21 - 2
+ for i := 1; i < 20; i++ { // 2^40 - 2^20
+ t2.Square(&t2)
+ }
+ t1.Multiply(&t2, &t1) // 2^40 - 1
+ t1.Square(&t1) // 2^41 - 2
+ for i := 1; i < 10; i++ { // 2^50 - 2^10
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // 2^50 - 1
+ t1.Square(&t0) // 2^51 - 2
+ for i := 1; i < 50; i++ { // 2^100 - 2^50
+ t1.Square(&t1)
+ }
+ t1.Multiply(&t1, &t0) // 2^100 - 1
+ t2.Square(&t1) // 2^101 - 2
+ for i := 1; i < 100; i++ { // 2^200 - 2^100
+ t2.Square(&t2)
+ }
+ t1.Multiply(&t2, &t1) // 2^200 - 1
+ t1.Square(&t1) // 2^201 - 2
+ for i := 1; i < 50; i++ { // 2^250 - 2^50
+ t1.Square(&t1)
+ }
+ t0.Multiply(&t1, &t0) // 2^250 - 1
+ t0.Square(&t0) // 2^251 - 2
+ t0.Square(&t0) // 2^252 - 4
+ return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
+}
+
+// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
+var sqrtM1 = &Element{1718705420411056, 234908883556509,
+ 2233514472574048, 2117202627021982, 765476049583133}
+
+// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
+//
+// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
+// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
+// and returns r and 0.
+func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) {
+ t0 := new(Element)
+
+ // r = (u * v3) * (u * v7)^((p-5)/8)
+ v2 := new(Element).Square(v)
+ uv3 := new(Element).Multiply(u, t0.Multiply(v2, v))
+ uv7 := new(Element).Multiply(uv3, t0.Square(v2))
+ rr := new(Element).Multiply(uv3, t0.Pow22523(uv7))
+
+ check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2
+
+ uNeg := new(Element).Negate(u)
+ correctSignSqrt := check.Equal(u)
+ flippedSignSqrt := check.Equal(uNeg)
+ flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1))
+
+ rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r
+ // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
+ rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI)
+
+ r.Absolute(rr) // Choose the nonnegative square root.
+ return r, correctSignSqrt | flippedSignSqrt
+}
diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.go b/vendor/filippo.io/edwards25519/field/fe_amd64.go
new file mode 100644
index 000000000..edcf163c4
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_amd64.go
@@ -0,0 +1,16 @@
+// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+// +build amd64,gc,!purego
+
+package field
+
+// feMul sets out = a * b. It works like feMulGeneric.
+//
+//go:noescape
+func feMul(out *Element, a *Element, b *Element)
+
+// feSquare sets out = a * a. It works like feSquareGeneric.
+//
+//go:noescape
+func feSquare(out *Element, a *Element)
diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.s b/vendor/filippo.io/edwards25519/field/fe_amd64.s
new file mode 100644
index 000000000..293f013c9
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_amd64.s
@@ -0,0 +1,379 @@
+// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego
+// +build amd64,gc,!purego
+
+#include "textflag.h"
+
+// func feMul(out *Element, a *Element, b *Element)
+TEXT ·feMul(SB), NOSPLIT, $0-24
+ MOVQ a+8(FP), CX
+ MOVQ b+16(FP), BX
+
+ // r0 = a0×b0
+ MOVQ (CX), AX
+ MULQ (BX)
+ MOVQ AX, DI
+ MOVQ DX, SI
+
+ // r0 += 19×a1×b4
+ MOVQ 8(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r0 += 19×a2×b3
+ MOVQ 16(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r0 += 19×a3×b2
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 16(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r0 += 19×a4×b1
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 8(BX)
+ ADDQ AX, DI
+ ADCQ DX, SI
+
+ // r1 = a0×b1
+ MOVQ (CX), AX
+ MULQ 8(BX)
+ MOVQ AX, R9
+ MOVQ DX, R8
+
+ // r1 += a1×b0
+ MOVQ 8(CX), AX
+ MULQ (BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r1 += 19×a2×b4
+ MOVQ 16(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r1 += 19×a3×b3
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r1 += 19×a4×b2
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 16(BX)
+ ADDQ AX, R9
+ ADCQ DX, R8
+
+ // r2 = a0×b2
+ MOVQ (CX), AX
+ MULQ 16(BX)
+ MOVQ AX, R11
+ MOVQ DX, R10
+
+ // r2 += a1×b1
+ MOVQ 8(CX), AX
+ MULQ 8(BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r2 += a2×b0
+ MOVQ 16(CX), AX
+ MULQ (BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r2 += 19×a3×b4
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r2 += 19×a4×b3
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(BX)
+ ADDQ AX, R11
+ ADCQ DX, R10
+
+ // r3 = a0×b3
+ MOVQ (CX), AX
+ MULQ 24(BX)
+ MOVQ AX, R13
+ MOVQ DX, R12
+
+ // r3 += a1×b2
+ MOVQ 8(CX), AX
+ MULQ 16(BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r3 += a2×b1
+ MOVQ 16(CX), AX
+ MULQ 8(BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r3 += a3×b0
+ MOVQ 24(CX), AX
+ MULQ (BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r3 += 19×a4×b4
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(BX)
+ ADDQ AX, R13
+ ADCQ DX, R12
+
+ // r4 = a0×b4
+ MOVQ (CX), AX
+ MULQ 32(BX)
+ MOVQ AX, R15
+ MOVQ DX, R14
+
+ // r4 += a1×b3
+ MOVQ 8(CX), AX
+ MULQ 24(BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // r4 += a2×b2
+ MOVQ 16(CX), AX
+ MULQ 16(BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // r4 += a3×b1
+ MOVQ 24(CX), AX
+ MULQ 8(BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // r4 += a4×b0
+ MOVQ 32(CX), AX
+ MULQ (BX)
+ ADDQ AX, R15
+ ADCQ DX, R14
+
+ // First reduction chain
+ MOVQ $0x0007ffffffffffff, AX
+ SHLQ $0x0d, DI, SI
+ SHLQ $0x0d, R9, R8
+ SHLQ $0x0d, R11, R10
+ SHLQ $0x0d, R13, R12
+ SHLQ $0x0d, R15, R14
+ ANDQ AX, DI
+ IMUL3Q $0x13, R14, R14
+ ADDQ R14, DI
+ ANDQ AX, R9
+ ADDQ SI, R9
+ ANDQ AX, R11
+ ADDQ R8, R11
+ ANDQ AX, R13
+ ADDQ R10, R13
+ ANDQ AX, R15
+ ADDQ R12, R15
+
+ // Second reduction chain (carryPropagate)
+ MOVQ DI, SI
+ SHRQ $0x33, SI
+ MOVQ R9, R8
+ SHRQ $0x33, R8
+ MOVQ R11, R10
+ SHRQ $0x33, R10
+ MOVQ R13, R12
+ SHRQ $0x33, R12
+ MOVQ R15, R14
+ SHRQ $0x33, R14
+ ANDQ AX, DI
+ IMUL3Q $0x13, R14, R14
+ ADDQ R14, DI
+ ANDQ AX, R9
+ ADDQ SI, R9
+ ANDQ AX, R11
+ ADDQ R8, R11
+ ANDQ AX, R13
+ ADDQ R10, R13
+ ANDQ AX, R15
+ ADDQ R12, R15
+
+ // Store output
+ MOVQ out+0(FP), AX
+ MOVQ DI, (AX)
+ MOVQ R9, 8(AX)
+ MOVQ R11, 16(AX)
+ MOVQ R13, 24(AX)
+ MOVQ R15, 32(AX)
+ RET
+
+// func feSquare(out *Element, a *Element)
+TEXT ·feSquare(SB), NOSPLIT, $0-16
+ MOVQ a+8(FP), CX
+
+ // r0 = l0×l0
+ MOVQ (CX), AX
+ MULQ (CX)
+ MOVQ AX, SI
+ MOVQ DX, BX
+
+ // r0 += 38×l1×l4
+ MOVQ 8(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, SI
+ ADCQ DX, BX
+
+ // r0 += 38×l2×l3
+ MOVQ 16(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 24(CX)
+ ADDQ AX, SI
+ ADCQ DX, BX
+
+ // r1 = 2×l0×l1
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 8(CX)
+ MOVQ AX, R8
+ MOVQ DX, DI
+
+ // r1 += 38×l2×l4
+ MOVQ 16(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, R8
+ ADCQ DX, DI
+
+ // r1 += 19×l3×l3
+ MOVQ 24(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 24(CX)
+ ADDQ AX, R8
+ ADCQ DX, DI
+
+ // r2 = 2×l0×l2
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 16(CX)
+ MOVQ AX, R10
+ MOVQ DX, R9
+
+ // r2 += l1×l1
+ MOVQ 8(CX), AX
+ MULQ 8(CX)
+ ADDQ AX, R10
+ ADCQ DX, R9
+
+ // r2 += 38×l3×l4
+ MOVQ 24(CX), AX
+ IMUL3Q $0x26, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, R10
+ ADCQ DX, R9
+
+ // r3 = 2×l0×l3
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 24(CX)
+ MOVQ AX, R12
+ MOVQ DX, R11
+
+ // r3 += 2×l1×l2
+ MOVQ 8(CX), AX
+ IMUL3Q $0x02, AX, AX
+ MULQ 16(CX)
+ ADDQ AX, R12
+ ADCQ DX, R11
+
+ // r3 += 19×l4×l4
+ MOVQ 32(CX), AX
+ IMUL3Q $0x13, AX, AX
+ MULQ 32(CX)
+ ADDQ AX, R12
+ ADCQ DX, R11
+
+ // r4 = 2×l0×l4
+ MOVQ (CX), AX
+ SHLQ $0x01, AX
+ MULQ 32(CX)
+ MOVQ AX, R14
+ MOVQ DX, R13
+
+ // r4 += 2×l1×l3
+ MOVQ 8(CX), AX
+ IMUL3Q $0x02, AX, AX
+ MULQ 24(CX)
+ ADDQ AX, R14
+ ADCQ DX, R13
+
+ // r4 += l2×l2
+ MOVQ 16(CX), AX
+ MULQ 16(CX)
+ ADDQ AX, R14
+ ADCQ DX, R13
+
+ // First reduction chain
+ MOVQ $0x0007ffffffffffff, AX
+ SHLQ $0x0d, SI, BX
+ SHLQ $0x0d, R8, DI
+ SHLQ $0x0d, R10, R9
+ SHLQ $0x0d, R12, R11
+ SHLQ $0x0d, R14, R13
+ ANDQ AX, SI
+ IMUL3Q $0x13, R13, R13
+ ADDQ R13, SI
+ ANDQ AX, R8
+ ADDQ BX, R8
+ ANDQ AX, R10
+ ADDQ DI, R10
+ ANDQ AX, R12
+ ADDQ R9, R12
+ ANDQ AX, R14
+ ADDQ R11, R14
+
+ // Second reduction chain (carryPropagate)
+ MOVQ SI, BX
+ SHRQ $0x33, BX
+ MOVQ R8, DI
+ SHRQ $0x33, DI
+ MOVQ R10, R9
+ SHRQ $0x33, R9
+ MOVQ R12, R11
+ SHRQ $0x33, R11
+ MOVQ R14, R13
+ SHRQ $0x33, R13
+ ANDQ AX, SI
+ IMUL3Q $0x13, R13, R13
+ ADDQ R13, SI
+ ANDQ AX, R8
+ ADDQ BX, R8
+ ANDQ AX, R10
+ ADDQ DI, R10
+ ANDQ AX, R12
+ ADDQ R9, R12
+ ANDQ AX, R14
+ ADDQ R11, R14
+
+ // Store output
+ MOVQ out+0(FP), AX
+ MOVQ SI, (AX)
+ MOVQ R8, 8(AX)
+ MOVQ R10, 16(AX)
+ MOVQ R12, 24(AX)
+ MOVQ R14, 32(AX)
+ RET
diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
new file mode 100644
index 000000000..ddb6c9b8f
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 || !gc || purego
+// +build !amd64 !gc purego
+
+package field
+
+func feMul(v, x, y *Element) { feMulGeneric(v, x, y) }
+
+func feSquare(v, x *Element) { feSquareGeneric(v, x) }
diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.go b/vendor/filippo.io/edwards25519/field/fe_arm64.go
new file mode 100644
index 000000000..af459ef51
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_arm64.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 && gc && !purego
+// +build arm64,gc,!purego
+
+package field
+
+//go:noescape
+func carryPropagate(v *Element)
+
+func (v *Element) carryPropagate() *Element {
+ carryPropagate(v)
+ return v
+}
diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.s b/vendor/filippo.io/edwards25519/field/fe_arm64.s
new file mode 100644
index 000000000..3126a4341
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_arm64.s
@@ -0,0 +1,42 @@
+// Copyright (c) 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build arm64 && gc && !purego
+
+#include "textflag.h"
+
+// carryPropagate works exactly like carryPropagateGeneric and uses the
+// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but
+// avoids loading R0-R4 twice and uses LDP and STP.
+//
+// See https://golang.org/issues/43145 for the main compiler issue.
+//
+// func carryPropagate(v *Element)
+TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8
+ MOVD v+0(FP), R20
+
+ LDP 0(R20), (R0, R1)
+ LDP 16(R20), (R2, R3)
+ MOVD 32(R20), R4
+
+ AND $0x7ffffffffffff, R0, R10
+ AND $0x7ffffffffffff, R1, R11
+ AND $0x7ffffffffffff, R2, R12
+ AND $0x7ffffffffffff, R3, R13
+ AND $0x7ffffffffffff, R4, R14
+
+ ADD R0>>51, R11, R11
+ ADD R1>>51, R12, R12
+ ADD R2>>51, R13, R13
+ ADD R3>>51, R14, R14
+ // R4>>51 * 19 + R10 -> R10
+ LSR $51, R4, R21
+ MOVD $19, R22
+ MADD R22, R10, R21, R10
+
+ STP (R10, R11), 0(R20)
+ STP (R12, R13), 16(R20)
+ MOVD R14, 32(R20)
+
+ RET
diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
new file mode 100644
index 000000000..234a5b2e5
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !arm64 || !gc || purego
+// +build !arm64 !gc purego
+
+package field
+
+func (v *Element) carryPropagate() *Element {
+ return v.carryPropagateGeneric()
+}
diff --git a/vendor/filippo.io/edwards25519/field/fe_extra.go b/vendor/filippo.io/edwards25519/field/fe_extra.go
new file mode 100644
index 000000000..1ef503b9a
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_extra.go
@@ -0,0 +1,50 @@
+// Copyright (c) 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "errors"
+
+// This file contains additional functionality that is not included in the
+// upstream crypto/ed25519/edwards25519/field package.
+
+// SetWideBytes sets v to x, where x is a 64-byte little-endian encoding, which
+// is reduced modulo the field order. If x is not of the right length,
+// SetWideBytes returns nil and an error, and the receiver is unchanged.
+//
+// SetWideBytes is not necessary to select a uniformly distributed value, and is
+// only provided for compatibility: SetBytes can be used instead as the chance
+// of bias is less than 2⁻²⁵⁰.
+func (v *Element) SetWideBytes(x []byte) (*Element, error) {
+ if len(x) != 64 {
+ return nil, errors.New("edwards25519: invalid SetWideBytes input size")
+ }
+
+ // Split the 64 bytes into two elements, and extract the most significant
+ // bit of each, which is ignored by SetBytes.
+ lo, _ := new(Element).SetBytes(x[:32])
+ loMSB := uint64(x[31] >> 7)
+ hi, _ := new(Element).SetBytes(x[32:])
+ hiMSB := uint64(x[63] >> 7)
+
+ // The output we want is
+ //
+ // v = lo + loMSB * 2²⁵⁵ + hi * 2²⁵⁶ + hiMSB * 2⁵¹¹
+ //
+ // which applying the reduction identity comes out to
+ //
+ // v = lo + loMSB * 19 + hi * 2 * 19 + hiMSB * 2 * 19²
+ //
+ // l0 will be the sum of a 52 bits value (lo.l0), plus a 5 bits value
+ // (loMSB * 19), a 6 bits value (hi.l0 * 2 * 19), and a 10 bits value
+ // (hiMSB * 2 * 19²), so it fits in a uint64.
+
+ v.l0 = lo.l0 + loMSB*19 + hi.l0*2*19 + hiMSB*2*19*19
+ v.l1 = lo.l1 + hi.l1*2*19
+ v.l2 = lo.l2 + hi.l2*2*19
+ v.l3 = lo.l3 + hi.l3*2*19
+ v.l4 = lo.l4 + hi.l4*2*19
+
+ return v.carryPropagate(), nil
+}
diff --git a/vendor/filippo.io/edwards25519/field/fe_generic.go b/vendor/filippo.io/edwards25519/field/fe_generic.go
new file mode 100644
index 000000000..86f5fd955
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/field/fe_generic.go
@@ -0,0 +1,266 @@
+// Copyright (c) 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package field
+
+import "math/bits"
+
+// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
+// bits.Mul64 and bits.Add64 intrinsics.
+type uint128 struct {
+ lo, hi uint64
+}
+
+// mul64 returns a * b.
+func mul64(a, b uint64) uint128 {
+ hi, lo := bits.Mul64(a, b)
+ return uint128{lo, hi}
+}
+
+// addMul64 returns v + a * b.
+func addMul64(v uint128, a, b uint64) uint128 {
+ hi, lo := bits.Mul64(a, b)
+ lo, c := bits.Add64(lo, v.lo, 0)
+ hi, _ = bits.Add64(hi, v.hi, c)
+ return uint128{lo, hi}
+}
+
+// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
+func shiftRightBy51(a uint128) uint64 {
+ return (a.hi << (64 - 51)) | (a.lo >> 51)
+}
+
+func feMulGeneric(v, a, b *Element) {
+ a0 := a.l0
+ a1 := a.l1
+ a2 := a.l2
+ a3 := a.l3
+ a4 := a.l4
+
+ b0 := b.l0
+ b1 := b.l1
+ b2 := b.l2
+ b3 := b.l3
+ b4 := b.l4
+
+ // Limb multiplication works like pen-and-paper columnar multiplication, but
+ // with 51-bit limbs instead of digits.
+ //
+ // a4 a3 a2 a1 a0 x
+ // b4 b3 b2 b1 b0 =
+ // ------------------------
+ // a4b0 a3b0 a2b0 a1b0 a0b0 +
+ // a4b1 a3b1 a2b1 a1b1 a0b1 +
+ // a4b2 a3b2 a2b2 a1b2 a0b2 +
+ // a4b3 a3b3 a2b3 a1b3 a0b3 +
+ // a4b4 a3b4 a2b4 a1b4 a0b4 =
+ // ----------------------------------------------
+ // r8 r7 r6 r5 r4 r3 r2 r1 r0
+ //
+ // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
+ // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
+ // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
+ //
+ // Reduction can be carried out simultaneously to multiplication. For
+ // example, we do not compute r5: whenever the result of a multiplication
+ // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
+ //
+ // a4b0 a3b0 a2b0 a1b0 a0b0 +
+ // a3b1 a2b1 a1b1 a0b1 19×a4b1 +
+ // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 +
+ // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 +
+ // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 =
+ // --------------------------------------
+ // r4 r3 r2 r1 r0
+ //
+ // Finally we add up the columns into wide, overlapping limbs.
+
+ a1_19 := a1 * 19
+ a2_19 := a2 * 19
+ a3_19 := a3 * 19
+ a4_19 := a4 * 19
+
+ // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+ r0 := mul64(a0, b0)
+ r0 = addMul64(r0, a1_19, b4)
+ r0 = addMul64(r0, a2_19, b3)
+ r0 = addMul64(r0, a3_19, b2)
+ r0 = addMul64(r0, a4_19, b1)
+
+ // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
+ r1 := mul64(a0, b1)
+ r1 = addMul64(r1, a1, b0)
+ r1 = addMul64(r1, a2_19, b4)
+ r1 = addMul64(r1, a3_19, b3)
+ r1 = addMul64(r1, a4_19, b2)
+
+ // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
+ r2 := mul64(a0, b2)
+ r2 = addMul64(r2, a1, b1)
+ r2 = addMul64(r2, a2, b0)
+ r2 = addMul64(r2, a3_19, b4)
+ r2 = addMul64(r2, a4_19, b3)
+
+ // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
+ r3 := mul64(a0, b3)
+ r3 = addMul64(r3, a1, b2)
+ r3 = addMul64(r3, a2, b1)
+ r3 = addMul64(r3, a3, b0)
+ r3 = addMul64(r3, a4_19, b4)
+
+ // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+ r4 := mul64(a0, b4)
+ r4 = addMul64(r4, a1, b3)
+ r4 = addMul64(r4, a2, b2)
+ r4 = addMul64(r4, a3, b1)
+ r4 = addMul64(r4, a4, b0)
+
+ // After the multiplication, we need to reduce (carry) the five coefficients
+ // to obtain a result with limbs that are at most slightly larger than 2⁵¹,
+ // to respect the Element invariant.
+ //
+ // Overall, the reduction works the same as carryPropagate, except with
+ // wider inputs: we take the carry for each coefficient by shifting it right
+ // by 51, and add it to the limb above it. The top carry is multiplied by 19
+ // according to the reduction identity and added to the lowest limb.
+ //
+ // The largest coefficient (r0) will be at most 111 bits, which guarantees
+ // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
+ //
+ // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
+ // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
+ // r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
+ // r0 < 2⁷ × 2⁵² × 2⁵²
+ // r0 < 2¹¹¹
+ //
+ // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
+ // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
+ // allows us to easily apply the reduction identity.
+ //
+ // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
+ // r4 < 5 × 2⁵² × 2⁵²
+ // r4 < 2¹⁰⁷
+ //
+
+ c0 := shiftRightBy51(r0)
+ c1 := shiftRightBy51(r1)
+ c2 := shiftRightBy51(r2)
+ c3 := shiftRightBy51(r3)
+ c4 := shiftRightBy51(r4)
+
+ rr0 := r0.lo&maskLow51Bits + c4*19
+ rr1 := r1.lo&maskLow51Bits + c0
+ rr2 := r2.lo&maskLow51Bits + c1
+ rr3 := r3.lo&maskLow51Bits + c2
+ rr4 := r4.lo&maskLow51Bits + c3
+
+ // Now all coefficients fit into 64-bit registers but are still too large to
+ // be passed around as an Element. We therefore do one last carry chain,
+ // where the carries will be small enough to fit in the wiggle room above 2⁵¹.
+ *v = Element{rr0, rr1, rr2, rr3, rr4}
+ v.carryPropagate()
+}
+
+func feSquareGeneric(v, a *Element) {
+ l0 := a.l0
+ l1 := a.l1
+ l2 := a.l2
+ l3 := a.l3
+ l4 := a.l4
+
+ // Squaring works precisely like multiplication above, but thanks to its
+ // symmetry we get to group a few terms together.
+ //
+ // l4 l3 l2 l1 l0 x
+ // l4 l3 l2 l1 l0 =
+ // ------------------------
+ // l4l0 l3l0 l2l0 l1l0 l0l0 +
+ // l4l1 l3l1 l2l1 l1l1 l0l1 +
+ // l4l2 l3l2 l2l2 l1l2 l0l2 +
+ // l4l3 l3l3 l2l3 l1l3 l0l3 +
+ // l4l4 l3l4 l2l4 l1l4 l0l4 =
+ // ----------------------------------------------
+ // r8 r7 r6 r5 r4 r3 r2 r1 r0
+ //
+ // l4l0 l3l0 l2l0 l1l0 l0l0 +
+ // l3l1 l2l1 l1l1 l0l1 19×l4l1 +
+ // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 +
+ // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 +
+ // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 =
+ // --------------------------------------
+ // r4 r3 r2 r1 r0
+ //
+ // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
+ // only three Mul64 and four Add64, instead of five and eight.
+
+ l0_2 := l0 * 2
+ l1_2 := l1 * 2
+
+ l1_38 := l1 * 38
+ l2_38 := l2 * 38
+ l3_38 := l3 * 38
+
+ l3_19 := l3 * 19
+ l4_19 := l4 * 19
+
+ // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
+ r0 := mul64(l0, l0)
+ r0 = addMul64(r0, l1_38, l4)
+ r0 = addMul64(r0, l2_38, l3)
+
+ // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
+ r1 := mul64(l0_2, l1)
+ r1 = addMul64(r1, l2_38, l4)
+ r1 = addMul64(r1, l3_19, l3)
+
+ // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
+ r2 := mul64(l0_2, l2)
+ r2 = addMul64(r2, l1, l1)
+ r2 = addMul64(r2, l3_38, l4)
+
+ // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
+ r3 := mul64(l0_2, l3)
+ r3 = addMul64(r3, l1_2, l2)
+ r3 = addMul64(r3, l4_19, l4)
+
+ // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
+ r4 := mul64(l0_2, l4)
+ r4 = addMul64(r4, l1_2, l3)
+ r4 = addMul64(r4, l2, l2)
+
+ c0 := shiftRightBy51(r0)
+ c1 := shiftRightBy51(r1)
+ c2 := shiftRightBy51(r2)
+ c3 := shiftRightBy51(r3)
+ c4 := shiftRightBy51(r4)
+
+ rr0 := r0.lo&maskLow51Bits + c4*19
+ rr1 := r1.lo&maskLow51Bits + c0
+ rr2 := r2.lo&maskLow51Bits + c1
+ rr3 := r3.lo&maskLow51Bits + c2
+ rr4 := r4.lo&maskLow51Bits + c3
+
+ *v = Element{rr0, rr1, rr2, rr3, rr4}
+ v.carryPropagate()
+}
+
+// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction
+// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry.
+func (v *Element) carryPropagateGeneric() *Element {
+ c0 := v.l0 >> 51
+ c1 := v.l1 >> 51
+ c2 := v.l2 >> 51
+ c3 := v.l3 >> 51
+ c4 := v.l4 >> 51
+
+ // c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and
+ // the final l0 will be at most 52 bits. Similarly for the rest.
+ v.l0 = v.l0&maskLow51Bits + c4*19
+ v.l1 = v.l1&maskLow51Bits + c0
+ v.l2 = v.l2&maskLow51Bits + c1
+ v.l3 = v.l3&maskLow51Bits + c2
+ v.l4 = v.l4&maskLow51Bits + c3
+
+ return v
+}
diff --git a/vendor/filippo.io/edwards25519/scalar.go b/vendor/filippo.io/edwards25519/scalar.go
new file mode 100644
index 000000000..3fd165387
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/scalar.go
@@ -0,0 +1,343 @@
+// Copyright (c) 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "encoding/binary"
+ "errors"
+)
+
+// A Scalar is an integer modulo
+//
+// l = 2^252 + 27742317777372353535851937790883648493
+//
+// which is the prime order of the edwards25519 group.
+//
+// This type works similarly to math/big.Int, and all arguments and
+// receivers are allowed to alias.
+//
+// The zero value is a valid zero element.
+type Scalar struct {
+ // s is the scalar in the Montgomery domain, in the format of the
+ // fiat-crypto implementation.
+ s fiatScalarMontgomeryDomainFieldElement
+}
+
+// The field implementation in scalar_fiat.go is generated by the fiat-crypto
+// project (https://github.com/mit-plv/fiat-crypto) at version v0.0.9 (23d2dbc)
+// from a formally verified model.
+//
+// fiat-crypto code comes under the following license.
+//
+// Copyright (c) 2015-2020 The fiat-crypto Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design,
+// Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+// NewScalar returns a new zero Scalar.
+func NewScalar() *Scalar {
+ return &Scalar{}
+}
+
+// MultiplyAdd sets s = x * y + z mod l, and returns s. It is equivalent to
+// using Multiply and then Add.
+func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar {
+ // Make a copy of z in case it aliases s.
+ zCopy := new(Scalar).Set(z)
+ return s.Multiply(x, y).Add(s, zCopy)
+}
+
+// Add sets s = x + y mod l, and returns s.
+func (s *Scalar) Add(x, y *Scalar) *Scalar {
+ // s = 1 * x + y mod l
+ fiatScalarAdd(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Subtract sets s = x - y mod l, and returns s.
+func (s *Scalar) Subtract(x, y *Scalar) *Scalar {
+ // s = -1 * y + x mod l
+ fiatScalarSub(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Negate sets s = -x mod l, and returns s.
+func (s *Scalar) Negate(x *Scalar) *Scalar {
+ // s = -1 * x + 0 mod l
+ fiatScalarOpp(&s.s, &x.s)
+ return s
+}
+
+// Multiply sets s = x * y mod l, and returns s.
+func (s *Scalar) Multiply(x, y *Scalar) *Scalar {
+ // s = x * y + 0 mod l
+ fiatScalarMul(&s.s, &x.s, &y.s)
+ return s
+}
+
+// Set sets s = x, and returns s.
+func (s *Scalar) Set(x *Scalar) *Scalar {
+ *s = *x
+ return s
+}
+
+// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer.
+// If x is not of the right length, SetUniformBytes returns nil and an error,
+// and the receiver is unchanged.
+//
+// SetUniformBytes can be used to set s to a uniformly distributed value given
+// 64 uniformly distributed random bytes.
+func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) {
+ if len(x) != 64 {
+ return nil, errors.New("edwards25519: invalid SetUniformBytes input length")
+ }
+
+ // We have a value x of 512 bits, but our fiatScalarFromBytes function
+ // expects an input lower than l, which is a little over 252 bits.
+ //
+ // Instead of writing a reduction function that operates on wider inputs, we
+ // can interpret x as the sum of three shorter values a, b, and c.
+ //
+ // x = a + b * 2^168 + c * 2^336 mod l
+ //
+ // We then precompute 2^168 and 2^336 modulo l, and perform the reduction
+ // with two multiplications and two additions.
+
+ s.setShortBytes(x[:21])
+ t := new(Scalar).setShortBytes(x[21:42])
+ s.Add(s, t.Multiply(t, scalarTwo168))
+ t.setShortBytes(x[42:])
+ s.Add(s, t.Multiply(t, scalarTwo336))
+
+ return s, nil
+}
+
+// scalarTwo168 and scalarTwo336 are 2^168 and 2^336 modulo l, encoded as a
+// fiatScalarMontgomeryDomainFieldElement, which is a little-endian 4-limb value
+// in the 2^256 Montgomery domain.
+var scalarTwo168 = &Scalar{s: [4]uint64{0x5b8ab432eac74798, 0x38afddd6de59d5d7,
+ 0xa2c131b399411b7c, 0x6329a7ed9ce5a30}}
+var scalarTwo336 = &Scalar{s: [4]uint64{0xbd3d108e2b35ecc5, 0x5c3a3718bdf9c90b,
+ 0x63aa97a331b4f2ee, 0x3d217f5be65cb5c}}
+
+// setShortBytes sets s = x mod l, where x is a little-endian integer shorter
+// than 32 bytes.
+func (s *Scalar) setShortBytes(x []byte) *Scalar {
+ if len(x) >= 32 {
+ panic("edwards25519: internal error: setShortBytes called with a long string")
+ }
+ var buf [32]byte
+ copy(buf[:], x)
+ fiatScalarFromBytes((*[4]uint64)(&s.s), &buf)
+ fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
+ return s
+}
+
+// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of
+// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes
+// returns nil and an error, and the receiver is unchanged.
+func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) {
+ if len(x) != 32 {
+ return nil, errors.New("invalid scalar length")
+ }
+ if !isReduced(x) {
+ return nil, errors.New("invalid scalar encoding")
+ }
+
+ fiatScalarFromBytes((*[4]uint64)(&s.s), (*[32]byte)(x))
+ fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
+
+ return s, nil
+}
+
+// scalarMinusOneBytes is l - 1 in little endian.
+var scalarMinusOneBytes = [32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16}
+
+// isReduced returns whether the given scalar in 32-byte little endian encoded
+// form is reduced modulo l.
+func isReduced(s []byte) bool {
+ if len(s) != 32 {
+ return false
+ }
+
+ for i := len(s) - 1; i >= 0; i-- {
+ switch {
+ case s[i] > scalarMinusOneBytes[i]:
+ return false
+ case s[i] < scalarMinusOneBytes[i]:
+ return true
+ }
+ }
+ return true
+}
+
+// SetBytesWithClamping applies the buffer pruning described in RFC 8032,
+// Section 5.1.5 (also known as clamping) and sets s to the result. The input
+// must be 32 bytes, and it is not modified. If x is not of the right length,
+// SetBytesWithClamping returns nil and an error, and the receiver is unchanged.
+//
+// Note that since Scalar values are always reduced modulo the prime order of
+// the curve, the resulting value will not preserve any of the cofactor-clearing
+// properties that clamping is meant to provide. It will however work as
+// expected as long as it is applied to points on the prime order subgroup, like
+// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
+// irrelevant RFC 7748 clamping, but it is now required for compatibility.
+func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) {
+ // The description above omits the purpose of the high bits of the clamping
+ // for brevity, but those are also lost to reductions, and are also
+ // irrelevant to edwards25519 as they protect against a specific
+ // implementation bug that was once observed in a generic Montgomery ladder.
+ if len(x) != 32 {
+ return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length")
+ }
+
+ // We need to use the wide reduction from SetUniformBytes, since clamping
+ // sets the 2^254 bit, making the value higher than the order.
+ var wideBytes [64]byte
+ copy(wideBytes[:], x[:])
+ wideBytes[0] &= 248
+ wideBytes[31] &= 63
+ wideBytes[31] |= 64
+ return s.SetUniformBytes(wideBytes[:])
+}
+
+// Bytes returns the canonical 32-byte little-endian encoding of s.
+func (s *Scalar) Bytes() []byte {
+ // This function is outlined to make the allocations inline in the caller
+ // rather than happen on the heap.
+ var encoded [32]byte
+ return s.bytes(&encoded)
+}
+
+func (s *Scalar) bytes(out *[32]byte) []byte {
+ var ss fiatScalarNonMontgomeryDomainFieldElement
+ fiatScalarFromMontgomery(&ss, &s.s)
+ fiatScalarToBytes(out, (*[4]uint64)(&ss))
+ return out[:]
+}
+
+// Equal returns 1 if s and t are equal, and 0 otherwise.
+func (s *Scalar) Equal(t *Scalar) int {
+ var diff fiatScalarMontgomeryDomainFieldElement
+ fiatScalarSub(&diff, &s.s, &t.s)
+ var nonzero uint64
+ fiatScalarNonzero(&nonzero, (*[4]uint64)(&diff))
+ nonzero |= nonzero >> 32
+ nonzero |= nonzero >> 16
+ nonzero |= nonzero >> 8
+ nonzero |= nonzero >> 4
+ nonzero |= nonzero >> 2
+ nonzero |= nonzero >> 1
+ return int(^nonzero) & 1
+}
+
+// nonAdjacentForm computes a width-w non-adjacent form for this scalar.
+//
+// w must be between 2 and 8, or nonAdjacentForm will panic.
+func (s *Scalar) nonAdjacentForm(w uint) [256]int8 {
+ // This implementation is adapted from the one
+ // in curve25519-dalek and is documented there:
+ // https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871
+ b := s.Bytes()
+ if b[31] > 127 {
+ panic("scalar has high bit set illegally")
+ }
+ if w < 2 {
+ panic("w must be at least 2 by the definition of NAF")
+ } else if w > 8 {
+ panic("NAF digits must fit in int8")
+ }
+
+ var naf [256]int8
+ var digits [5]uint64
+
+ for i := 0; i < 4; i++ {
+ digits[i] = binary.LittleEndian.Uint64(b[i*8:])
+ }
+
+ width := uint64(1 << w)
+ windowMask := uint64(width - 1)
+
+ pos := uint(0)
+ carry := uint64(0)
+ for pos < 256 {
+ indexU64 := pos / 64
+ indexBit := pos % 64
+ var bitBuf uint64
+ if indexBit < 64-w {
+ // This window's bits are contained in a single u64
+ bitBuf = digits[indexU64] >> indexBit
+ } else {
+ // Combine the current 64 bits with bits from the next 64
+ bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit))
+ }
+
+ // Add carry into the current window
+ window := carry + (bitBuf & windowMask)
+
+ if window&1 == 0 {
+ // If the window value is even, preserve the carry and continue.
+ // Why is the carry preserved?
+ // If carry == 0 and window & 1 == 0,
+ // then the next carry should be 0
+ // If carry == 1 and window & 1 == 0,
+ // then bit_buf & 1 == 1 so the next carry should be 1
+ pos += 1
+ continue
+ }
+
+ if window < width/2 {
+ carry = 0
+ naf[pos] = int8(window)
+ } else {
+ carry = 1
+ naf[pos] = int8(window) - int8(width)
+ }
+
+ pos += w
+ }
+ return naf
+}
+
+func (s *Scalar) signedRadix16() [64]int8 {
+ b := s.Bytes()
+ if b[31] > 127 {
+ panic("scalar has high bit set illegally")
+ }
+
+ var digits [64]int8
+
+ // Compute unsigned radix-16 digits:
+ for i := 0; i < 32; i++ {
+ digits[2*i] = int8(b[i] & 15)
+ digits[2*i+1] = int8((b[i] >> 4) & 15)
+ }
+
+ // Recenter coefficients:
+ for i := 0; i < 63; i++ {
+ carry := (digits[i] + 8) >> 4
+ digits[i] -= carry << 4
+ digits[i+1] += carry
+ }
+
+ return digits
+}
diff --git a/vendor/filippo.io/edwards25519/scalar_fiat.go b/vendor/filippo.io/edwards25519/scalar_fiat.go
new file mode 100644
index 000000000..2e5782b60
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/scalar_fiat.go
@@ -0,0 +1,1147 @@
+// Code generated by Fiat Cryptography. DO NOT EDIT.
+//
+// Autogenerated: word_by_word_montgomery --lang Go --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name edwards25519 Scalar 64 '2^252 + 27742317777372353535851937790883648493' mul add sub opp nonzero from_montgomery to_montgomery to_bytes from_bytes
+//
+// curve description: Scalar
+//
+// machine_wordsize = 64 (from "64")
+//
+// requested operations: mul, add, sub, opp, nonzero, from_montgomery, to_montgomery, to_bytes, from_bytes
+//
+// m = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed (from "2^252 + 27742317777372353535851937790883648493")
+//
+//
+//
+// NOTE: In addition to the bounds specified above each function, all
+//
+// functions synthesized for this Montgomery arithmetic require the
+//
+// input to be strictly less than the prime modulus (m), and also
+//
+// require the input to be in the unique saturated representation.
+//
+// All functions also ensure that these two properties are true of
+//
+// return values.
+//
+//
+//
+// Computed values:
+//
+// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192)
+//
+// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248)
+//
+// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in
+//
+// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
+
+package edwards25519
+
+import "math/bits"
+
+type fiatScalarUint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+type fiatScalarInt1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
+
+// The type fiatScalarMontgomeryDomainFieldElement is a field element in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type fiatScalarMontgomeryDomainFieldElement [4]uint64
+
+// The type fiatScalarNonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
+//
+// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+type fiatScalarNonMontgomeryDomainFieldElement [4]uint64
+
+// fiatScalarCmovznzU64 is a single-word conditional move.
+//
+// Postconditions:
+//
+// out1 = (if arg1 = 0 then arg2 else arg3)
+//
+// Input Bounds:
+//
+// arg1: [0x0 ~> 0x1]
+// arg2: [0x0 ~> 0xffffffffffffffff]
+// arg3: [0x0 ~> 0xffffffffffffffff]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func fiatScalarCmovznzU64(out1 *uint64, arg1 fiatScalarUint1, arg2 uint64, arg3 uint64) {
+ x1 := (uint64(arg1) * 0xffffffffffffffff)
+ x2 := ((x1 & arg3) | ((^x1) & arg2))
+ *out1 = x2
+}
+
+// fiatScalarMul multiplies two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarMul(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, arg2[3])
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, arg2[2])
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, arg2[1])
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, arg2[0])
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
+ x19 := (uint64(fiatScalarUint1(x18)) + x6)
+ var x20 uint64
+ _, x20 = bits.Mul64(x11, 0xd2b51da312547e1b)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x20, 0x1000000000000000)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x20, 0x14def9dea2f79cd6)
+ var x26 uint64
+ var x27 uint64
+ x27, x26 = bits.Mul64(x20, 0x5812631a5cf5d3ed)
+ var x28 uint64
+ var x29 uint64
+ x28, x29 = bits.Add64(x27, x24, uint64(0x0))
+ x30 := (uint64(fiatScalarUint1(x29)) + x25)
+ var x32 uint64
+ _, x32 = bits.Add64(x11, x26, uint64(0x0))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x13, x28, uint64(fiatScalarUint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x15, x30, uint64(fiatScalarUint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x37, x38 = bits.Add64(x17, x22, uint64(fiatScalarUint1(x36)))
+ var x39 uint64
+ var x40 uint64
+ x39, x40 = bits.Add64(x19, x23, uint64(fiatScalarUint1(x38)))
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, arg2[3])
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, arg2[2])
+ var x45 uint64
+ var x46 uint64
+ x46, x45 = bits.Mul64(x1, arg2[1])
+ var x47 uint64
+ var x48 uint64
+ x48, x47 = bits.Mul64(x1, arg2[0])
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x48, x45, uint64(0x0))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x46, x43, uint64(fiatScalarUint1(x50)))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x44, x41, uint64(fiatScalarUint1(x52)))
+ x55 := (uint64(fiatScalarUint1(x54)) + x42)
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x33, x47, uint64(0x0))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64(x35, x49, uint64(fiatScalarUint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x37, x51, uint64(fiatScalarUint1(x59)))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x39, x53, uint64(fiatScalarUint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(uint64(fiatScalarUint1(x40)), x55, uint64(fiatScalarUint1(x63)))
+ var x66 uint64
+ _, x66 = bits.Mul64(x56, 0xd2b51da312547e1b)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x66, 0x1000000000000000)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x73, x70, uint64(0x0))
+ x76 := (uint64(fiatScalarUint1(x75)) + x71)
+ var x78 uint64
+ _, x78 = bits.Add64(x56, x72, uint64(0x0))
+ var x79 uint64
+ var x80 uint64
+ x79, x80 = bits.Add64(x58, x74, uint64(fiatScalarUint1(x78)))
+ var x81 uint64
+ var x82 uint64
+ x81, x82 = bits.Add64(x60, x76, uint64(fiatScalarUint1(x80)))
+ var x83 uint64
+ var x84 uint64
+ x83, x84 = bits.Add64(x62, x68, uint64(fiatScalarUint1(x82)))
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x64, x69, uint64(fiatScalarUint1(x84)))
+ x87 := (uint64(fiatScalarUint1(x86)) + uint64(fiatScalarUint1(x65)))
+ var x88 uint64
+ var x89 uint64
+ x89, x88 = bits.Mul64(x2, arg2[3])
+ var x90 uint64
+ var x91 uint64
+ x91, x90 = bits.Mul64(x2, arg2[2])
+ var x92 uint64
+ var x93 uint64
+ x93, x92 = bits.Mul64(x2, arg2[1])
+ var x94 uint64
+ var x95 uint64
+ x95, x94 = bits.Mul64(x2, arg2[0])
+ var x96 uint64
+ var x97 uint64
+ x96, x97 = bits.Add64(x95, x92, uint64(0x0))
+ var x98 uint64
+ var x99 uint64
+ x98, x99 = bits.Add64(x93, x90, uint64(fiatScalarUint1(x97)))
+ var x100 uint64
+ var x101 uint64
+ x100, x101 = bits.Add64(x91, x88, uint64(fiatScalarUint1(x99)))
+ x102 := (uint64(fiatScalarUint1(x101)) + x89)
+ var x103 uint64
+ var x104 uint64
+ x103, x104 = bits.Add64(x79, x94, uint64(0x0))
+ var x105 uint64
+ var x106 uint64
+ x105, x106 = bits.Add64(x81, x96, uint64(fiatScalarUint1(x104)))
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x83, x98, uint64(fiatScalarUint1(x106)))
+ var x109 uint64
+ var x110 uint64
+ x109, x110 = bits.Add64(x85, x100, uint64(fiatScalarUint1(x108)))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x87, x102, uint64(fiatScalarUint1(x110)))
+ var x113 uint64
+ _, x113 = bits.Mul64(x103, 0xd2b51da312547e1b)
+ var x115 uint64
+ var x116 uint64
+ x116, x115 = bits.Mul64(x113, 0x1000000000000000)
+ var x117 uint64
+ var x118 uint64
+ x118, x117 = bits.Mul64(x113, 0x14def9dea2f79cd6)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x113, 0x5812631a5cf5d3ed)
+ var x121 uint64
+ var x122 uint64
+ x121, x122 = bits.Add64(x120, x117, uint64(0x0))
+ x123 := (uint64(fiatScalarUint1(x122)) + x118)
+ var x125 uint64
+ _, x125 = bits.Add64(x103, x119, uint64(0x0))
+ var x126 uint64
+ var x127 uint64
+ x126, x127 = bits.Add64(x105, x121, uint64(fiatScalarUint1(x125)))
+ var x128 uint64
+ var x129 uint64
+ x128, x129 = bits.Add64(x107, x123, uint64(fiatScalarUint1(x127)))
+ var x130 uint64
+ var x131 uint64
+ x130, x131 = bits.Add64(x109, x115, uint64(fiatScalarUint1(x129)))
+ var x132 uint64
+ var x133 uint64
+ x132, x133 = bits.Add64(x111, x116, uint64(fiatScalarUint1(x131)))
+ x134 := (uint64(fiatScalarUint1(x133)) + uint64(fiatScalarUint1(x112)))
+ var x135 uint64
+ var x136 uint64
+ x136, x135 = bits.Mul64(x3, arg2[3])
+ var x137 uint64
+ var x138 uint64
+ x138, x137 = bits.Mul64(x3, arg2[2])
+ var x139 uint64
+ var x140 uint64
+ x140, x139 = bits.Mul64(x3, arg2[1])
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x3, arg2[0])
+ var x143 uint64
+ var x144 uint64
+ x143, x144 = bits.Add64(x142, x139, uint64(0x0))
+ var x145 uint64
+ var x146 uint64
+ x145, x146 = bits.Add64(x140, x137, uint64(fiatScalarUint1(x144)))
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x138, x135, uint64(fiatScalarUint1(x146)))
+ x149 := (uint64(fiatScalarUint1(x148)) + x136)
+ var x150 uint64
+ var x151 uint64
+ x150, x151 = bits.Add64(x126, x141, uint64(0x0))
+ var x152 uint64
+ var x153 uint64
+ x152, x153 = bits.Add64(x128, x143, uint64(fiatScalarUint1(x151)))
+ var x154 uint64
+ var x155 uint64
+ x154, x155 = bits.Add64(x130, x145, uint64(fiatScalarUint1(x153)))
+ var x156 uint64
+ var x157 uint64
+ x156, x157 = bits.Add64(x132, x147, uint64(fiatScalarUint1(x155)))
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Add64(x134, x149, uint64(fiatScalarUint1(x157)))
+ var x160 uint64
+ _, x160 = bits.Mul64(x150, 0xd2b51da312547e1b)
+ var x162 uint64
+ var x163 uint64
+ x163, x162 = bits.Mul64(x160, 0x1000000000000000)
+ var x164 uint64
+ var x165 uint64
+ x165, x164 = bits.Mul64(x160, 0x14def9dea2f79cd6)
+ var x166 uint64
+ var x167 uint64
+ x167, x166 = bits.Mul64(x160, 0x5812631a5cf5d3ed)
+ var x168 uint64
+ var x169 uint64
+ x168, x169 = bits.Add64(x167, x164, uint64(0x0))
+ x170 := (uint64(fiatScalarUint1(x169)) + x165)
+ var x172 uint64
+ _, x172 = bits.Add64(x150, x166, uint64(0x0))
+ var x173 uint64
+ var x174 uint64
+ x173, x174 = bits.Add64(x152, x168, uint64(fiatScalarUint1(x172)))
+ var x175 uint64
+ var x176 uint64
+ x175, x176 = bits.Add64(x154, x170, uint64(fiatScalarUint1(x174)))
+ var x177 uint64
+ var x178 uint64
+ x177, x178 = bits.Add64(x156, x162, uint64(fiatScalarUint1(x176)))
+ var x179 uint64
+ var x180 uint64
+ x179, x180 = bits.Add64(x158, x163, uint64(fiatScalarUint1(x178)))
+ x181 := (uint64(fiatScalarUint1(x180)) + uint64(fiatScalarUint1(x159)))
+ var x182 uint64
+ var x183 uint64
+ x182, x183 = bits.Sub64(x173, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x184 uint64
+ var x185 uint64
+ x184, x185 = bits.Sub64(x175, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x183)))
+ var x186 uint64
+ var x187 uint64
+ x186, x187 = bits.Sub64(x177, uint64(0x0), uint64(fiatScalarUint1(x185)))
+ var x188 uint64
+ var x189 uint64
+ x188, x189 = bits.Sub64(x179, 0x1000000000000000, uint64(fiatScalarUint1(x187)))
+ var x191 uint64
+ _, x191 = bits.Sub64(x181, uint64(0x0), uint64(fiatScalarUint1(x189)))
+ var x192 uint64
+ fiatScalarCmovznzU64(&x192, fiatScalarUint1(x191), x182, x173)
+ var x193 uint64
+ fiatScalarCmovznzU64(&x193, fiatScalarUint1(x191), x184, x175)
+ var x194 uint64
+ fiatScalarCmovznzU64(&x194, fiatScalarUint1(x191), x186, x177)
+ var x195 uint64
+ fiatScalarCmovznzU64(&x195, fiatScalarUint1(x191), x188, x179)
+ out1[0] = x192
+ out1[1] = x193
+ out1[2] = x194
+ out1[3] = x195
+}
+
+// fiatScalarAdd adds two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarAdd(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ var x10 uint64
+ x9, x10 = bits.Sub64(x1, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x11 uint64
+ var x12 uint64
+ x11, x12 = bits.Sub64(x3, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x10)))
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Sub64(x5, uint64(0x0), uint64(fiatScalarUint1(x12)))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Sub64(x7, 0x1000000000000000, uint64(fiatScalarUint1(x14)))
+ var x18 uint64
+ _, x18 = bits.Sub64(uint64(fiatScalarUint1(x8)), uint64(0x0), uint64(fiatScalarUint1(x16)))
+ var x19 uint64
+ fiatScalarCmovznzU64(&x19, fiatScalarUint1(x18), x9, x1)
+ var x20 uint64
+ fiatScalarCmovznzU64(&x20, fiatScalarUint1(x18), x11, x3)
+ var x21 uint64
+ fiatScalarCmovznzU64(&x21, fiatScalarUint1(x18), x13, x5)
+ var x22 uint64
+ fiatScalarCmovznzU64(&x22, fiatScalarUint1(x18), x15, x7)
+ out1[0] = x19
+ out1[1] = x20
+ out1[2] = x21
+ out1[3] = x22
+}
+
+// fiatScalarSub subtracts two field elements in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+// 0 ≤ eval arg2 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarSub(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// fiatScalarOpp negates a field element in the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = -eval (from_montgomery arg1) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarOpp(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
+ var x1 uint64
+ var x2 uint64
+ x1, x2 = bits.Sub64(uint64(0x0), arg1[0], uint64(0x0))
+ var x3 uint64
+ var x4 uint64
+ x3, x4 = bits.Sub64(uint64(0x0), arg1[1], uint64(fiatScalarUint1(x2)))
+ var x5 uint64
+ var x6 uint64
+ x5, x6 = bits.Sub64(uint64(0x0), arg1[2], uint64(fiatScalarUint1(x4)))
+ var x7 uint64
+ var x8 uint64
+ x7, x8 = bits.Sub64(uint64(0x0), arg1[3], uint64(fiatScalarUint1(x6)))
+ var x9 uint64
+ fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
+ var x12 uint64
+ var x13 uint64
+ x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
+ out1[0] = x10
+ out1[1] = x12
+ out1[2] = x14
+ out1[3] = x16
+}
+
+// fiatScalarNonzero outputs a single non-zero word if the input is non-zero and zero otherwise.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = 0 ↔ eval (from_montgomery arg1) mod m = 0
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [0x0 ~> 0xffffffffffffffff]
+func fiatScalarNonzero(out1 *uint64, arg1 *[4]uint64) {
+ x1 := (arg1[0] | (arg1[1] | (arg1[2] | arg1[3])))
+ *out1 = x1
+}
+
+// fiatScalarFromMontgomery translates a field element out of the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m
+// 0 ≤ eval out1 < m
+func fiatScalarFromMontgomery(out1 *fiatScalarNonMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
+ x1 := arg1[0]
+ var x2 uint64
+ _, x2 = bits.Mul64(x1, 0xd2b51da312547e1b)
+ var x4 uint64
+ var x5 uint64
+ x5, x4 = bits.Mul64(x2, 0x1000000000000000)
+ var x6 uint64
+ var x7 uint64
+ x7, x6 = bits.Mul64(x2, 0x14def9dea2f79cd6)
+ var x8 uint64
+ var x9 uint64
+ x9, x8 = bits.Mul64(x2, 0x5812631a5cf5d3ed)
+ var x10 uint64
+ var x11 uint64
+ x10, x11 = bits.Add64(x9, x6, uint64(0x0))
+ var x13 uint64
+ _, x13 = bits.Add64(x1, x8, uint64(0x0))
+ var x14 uint64
+ var x15 uint64
+ x14, x15 = bits.Add64(uint64(0x0), x10, uint64(fiatScalarUint1(x13)))
+ var x16 uint64
+ var x17 uint64
+ x16, x17 = bits.Add64(x14, arg1[1], uint64(0x0))
+ var x18 uint64
+ _, x18 = bits.Mul64(x16, 0xd2b51da312547e1b)
+ var x20 uint64
+ var x21 uint64
+ x21, x20 = bits.Mul64(x18, 0x1000000000000000)
+ var x22 uint64
+ var x23 uint64
+ x23, x22 = bits.Mul64(x18, 0x14def9dea2f79cd6)
+ var x24 uint64
+ var x25 uint64
+ x25, x24 = bits.Mul64(x18, 0x5812631a5cf5d3ed)
+ var x26 uint64
+ var x27 uint64
+ x26, x27 = bits.Add64(x25, x22, uint64(0x0))
+ var x29 uint64
+ _, x29 = bits.Add64(x16, x24, uint64(0x0))
+ var x30 uint64
+ var x31 uint64
+ x30, x31 = bits.Add64((uint64(fiatScalarUint1(x17)) + (uint64(fiatScalarUint1(x15)) + (uint64(fiatScalarUint1(x11)) + x7))), x26, uint64(fiatScalarUint1(x29)))
+ var x32 uint64
+ var x33 uint64
+ x32, x33 = bits.Add64(x4, (uint64(fiatScalarUint1(x27)) + x23), uint64(fiatScalarUint1(x31)))
+ var x34 uint64
+ var x35 uint64
+ x34, x35 = bits.Add64(x5, x20, uint64(fiatScalarUint1(x33)))
+ var x36 uint64
+ var x37 uint64
+ x36, x37 = bits.Add64(x30, arg1[2], uint64(0x0))
+ var x38 uint64
+ var x39 uint64
+ x38, x39 = bits.Add64(x32, uint64(0x0), uint64(fiatScalarUint1(x37)))
+ var x40 uint64
+ var x41 uint64
+ x40, x41 = bits.Add64(x34, uint64(0x0), uint64(fiatScalarUint1(x39)))
+ var x42 uint64
+ _, x42 = bits.Mul64(x36, 0xd2b51da312547e1b)
+ var x44 uint64
+ var x45 uint64
+ x45, x44 = bits.Mul64(x42, 0x1000000000000000)
+ var x46 uint64
+ var x47 uint64
+ x47, x46 = bits.Mul64(x42, 0x14def9dea2f79cd6)
+ var x48 uint64
+ var x49 uint64
+ x49, x48 = bits.Mul64(x42, 0x5812631a5cf5d3ed)
+ var x50 uint64
+ var x51 uint64
+ x50, x51 = bits.Add64(x49, x46, uint64(0x0))
+ var x53 uint64
+ _, x53 = bits.Add64(x36, x48, uint64(0x0))
+ var x54 uint64
+ var x55 uint64
+ x54, x55 = bits.Add64(x38, x50, uint64(fiatScalarUint1(x53)))
+ var x56 uint64
+ var x57 uint64
+ x56, x57 = bits.Add64(x40, (uint64(fiatScalarUint1(x51)) + x47), uint64(fiatScalarUint1(x55)))
+ var x58 uint64
+ var x59 uint64
+ x58, x59 = bits.Add64((uint64(fiatScalarUint1(x41)) + (uint64(fiatScalarUint1(x35)) + x21)), x44, uint64(fiatScalarUint1(x57)))
+ var x60 uint64
+ var x61 uint64
+ x60, x61 = bits.Add64(x54, arg1[3], uint64(0x0))
+ var x62 uint64
+ var x63 uint64
+ x62, x63 = bits.Add64(x56, uint64(0x0), uint64(fiatScalarUint1(x61)))
+ var x64 uint64
+ var x65 uint64
+ x64, x65 = bits.Add64(x58, uint64(0x0), uint64(fiatScalarUint1(x63)))
+ var x66 uint64
+ _, x66 = bits.Mul64(x60, 0xd2b51da312547e1b)
+ var x68 uint64
+ var x69 uint64
+ x69, x68 = bits.Mul64(x66, 0x1000000000000000)
+ var x70 uint64
+ var x71 uint64
+ x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
+ var x72 uint64
+ var x73 uint64
+ x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
+ var x74 uint64
+ var x75 uint64
+ x74, x75 = bits.Add64(x73, x70, uint64(0x0))
+ var x77 uint64
+ _, x77 = bits.Add64(x60, x72, uint64(0x0))
+ var x78 uint64
+ var x79 uint64
+ x78, x79 = bits.Add64(x62, x74, uint64(fiatScalarUint1(x77)))
+ var x80 uint64
+ var x81 uint64
+ x80, x81 = bits.Add64(x64, (uint64(fiatScalarUint1(x75)) + x71), uint64(fiatScalarUint1(x79)))
+ var x82 uint64
+ var x83 uint64
+ x82, x83 = bits.Add64((uint64(fiatScalarUint1(x65)) + (uint64(fiatScalarUint1(x59)) + x45)), x68, uint64(fiatScalarUint1(x81)))
+ x84 := (uint64(fiatScalarUint1(x83)) + x69)
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Sub64(x78, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Sub64(x80, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Sub64(x82, uint64(0x0), uint64(fiatScalarUint1(x88)))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Sub64(x84, 0x1000000000000000, uint64(fiatScalarUint1(x90)))
+ var x94 uint64
+ _, x94 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x92)))
+ var x95 uint64
+ fiatScalarCmovznzU64(&x95, fiatScalarUint1(x94), x85, x78)
+ var x96 uint64
+ fiatScalarCmovznzU64(&x96, fiatScalarUint1(x94), x87, x80)
+ var x97 uint64
+ fiatScalarCmovznzU64(&x97, fiatScalarUint1(x94), x89, x82)
+ var x98 uint64
+ fiatScalarCmovznzU64(&x98, fiatScalarUint1(x94), x91, x84)
+ out1[0] = x95
+ out1[1] = x96
+ out1[2] = x97
+ out1[3] = x98
+}
+
+// fiatScalarToMontgomery translates a field element into the Montgomery domain.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// eval (from_montgomery out1) mod m = eval arg1 mod m
+// 0 ≤ eval out1 < m
+func fiatScalarToMontgomery(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarNonMontgomeryDomainFieldElement) {
+ x1 := arg1[1]
+ x2 := arg1[2]
+ x3 := arg1[3]
+ x4 := arg1[0]
+ var x5 uint64
+ var x6 uint64
+ x6, x5 = bits.Mul64(x4, 0x399411b7c309a3d)
+ var x7 uint64
+ var x8 uint64
+ x8, x7 = bits.Mul64(x4, 0xceec73d217f5be65)
+ var x9 uint64
+ var x10 uint64
+ x10, x9 = bits.Mul64(x4, 0xd00e1ba768859347)
+ var x11 uint64
+ var x12 uint64
+ x12, x11 = bits.Mul64(x4, 0xa40611e3449c0f01)
+ var x13 uint64
+ var x14 uint64
+ x13, x14 = bits.Add64(x12, x9, uint64(0x0))
+ var x15 uint64
+ var x16 uint64
+ x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
+ var x17 uint64
+ var x18 uint64
+ x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
+ var x19 uint64
+ _, x19 = bits.Mul64(x11, 0xd2b51da312547e1b)
+ var x21 uint64
+ var x22 uint64
+ x22, x21 = bits.Mul64(x19, 0x1000000000000000)
+ var x23 uint64
+ var x24 uint64
+ x24, x23 = bits.Mul64(x19, 0x14def9dea2f79cd6)
+ var x25 uint64
+ var x26 uint64
+ x26, x25 = bits.Mul64(x19, 0x5812631a5cf5d3ed)
+ var x27 uint64
+ var x28 uint64
+ x27, x28 = bits.Add64(x26, x23, uint64(0x0))
+ var x30 uint64
+ _, x30 = bits.Add64(x11, x25, uint64(0x0))
+ var x31 uint64
+ var x32 uint64
+ x31, x32 = bits.Add64(x13, x27, uint64(fiatScalarUint1(x30)))
+ var x33 uint64
+ var x34 uint64
+ x33, x34 = bits.Add64(x15, (uint64(fiatScalarUint1(x28)) + x24), uint64(fiatScalarUint1(x32)))
+ var x35 uint64
+ var x36 uint64
+ x35, x36 = bits.Add64(x17, x21, uint64(fiatScalarUint1(x34)))
+ var x37 uint64
+ var x38 uint64
+ x38, x37 = bits.Mul64(x1, 0x399411b7c309a3d)
+ var x39 uint64
+ var x40 uint64
+ x40, x39 = bits.Mul64(x1, 0xceec73d217f5be65)
+ var x41 uint64
+ var x42 uint64
+ x42, x41 = bits.Mul64(x1, 0xd00e1ba768859347)
+ var x43 uint64
+ var x44 uint64
+ x44, x43 = bits.Mul64(x1, 0xa40611e3449c0f01)
+ var x45 uint64
+ var x46 uint64
+ x45, x46 = bits.Add64(x44, x41, uint64(0x0))
+ var x47 uint64
+ var x48 uint64
+ x47, x48 = bits.Add64(x42, x39, uint64(fiatScalarUint1(x46)))
+ var x49 uint64
+ var x50 uint64
+ x49, x50 = bits.Add64(x40, x37, uint64(fiatScalarUint1(x48)))
+ var x51 uint64
+ var x52 uint64
+ x51, x52 = bits.Add64(x31, x43, uint64(0x0))
+ var x53 uint64
+ var x54 uint64
+ x53, x54 = bits.Add64(x33, x45, uint64(fiatScalarUint1(x52)))
+ var x55 uint64
+ var x56 uint64
+ x55, x56 = bits.Add64(x35, x47, uint64(fiatScalarUint1(x54)))
+ var x57 uint64
+ var x58 uint64
+ x57, x58 = bits.Add64(((uint64(fiatScalarUint1(x36)) + (uint64(fiatScalarUint1(x18)) + x6)) + x22), x49, uint64(fiatScalarUint1(x56)))
+ var x59 uint64
+ _, x59 = bits.Mul64(x51, 0xd2b51da312547e1b)
+ var x61 uint64
+ var x62 uint64
+ x62, x61 = bits.Mul64(x59, 0x1000000000000000)
+ var x63 uint64
+ var x64 uint64
+ x64, x63 = bits.Mul64(x59, 0x14def9dea2f79cd6)
+ var x65 uint64
+ var x66 uint64
+ x66, x65 = bits.Mul64(x59, 0x5812631a5cf5d3ed)
+ var x67 uint64
+ var x68 uint64
+ x67, x68 = bits.Add64(x66, x63, uint64(0x0))
+ var x70 uint64
+ _, x70 = bits.Add64(x51, x65, uint64(0x0))
+ var x71 uint64
+ var x72 uint64
+ x71, x72 = bits.Add64(x53, x67, uint64(fiatScalarUint1(x70)))
+ var x73 uint64
+ var x74 uint64
+ x73, x74 = bits.Add64(x55, (uint64(fiatScalarUint1(x68)) + x64), uint64(fiatScalarUint1(x72)))
+ var x75 uint64
+ var x76 uint64
+ x75, x76 = bits.Add64(x57, x61, uint64(fiatScalarUint1(x74)))
+ var x77 uint64
+ var x78 uint64
+ x78, x77 = bits.Mul64(x2, 0x399411b7c309a3d)
+ var x79 uint64
+ var x80 uint64
+ x80, x79 = bits.Mul64(x2, 0xceec73d217f5be65)
+ var x81 uint64
+ var x82 uint64
+ x82, x81 = bits.Mul64(x2, 0xd00e1ba768859347)
+ var x83 uint64
+ var x84 uint64
+ x84, x83 = bits.Mul64(x2, 0xa40611e3449c0f01)
+ var x85 uint64
+ var x86 uint64
+ x85, x86 = bits.Add64(x84, x81, uint64(0x0))
+ var x87 uint64
+ var x88 uint64
+ x87, x88 = bits.Add64(x82, x79, uint64(fiatScalarUint1(x86)))
+ var x89 uint64
+ var x90 uint64
+ x89, x90 = bits.Add64(x80, x77, uint64(fiatScalarUint1(x88)))
+ var x91 uint64
+ var x92 uint64
+ x91, x92 = bits.Add64(x71, x83, uint64(0x0))
+ var x93 uint64
+ var x94 uint64
+ x93, x94 = bits.Add64(x73, x85, uint64(fiatScalarUint1(x92)))
+ var x95 uint64
+ var x96 uint64
+ x95, x96 = bits.Add64(x75, x87, uint64(fiatScalarUint1(x94)))
+ var x97 uint64
+ var x98 uint64
+ x97, x98 = bits.Add64(((uint64(fiatScalarUint1(x76)) + (uint64(fiatScalarUint1(x58)) + (uint64(fiatScalarUint1(x50)) + x38))) + x62), x89, uint64(fiatScalarUint1(x96)))
+ var x99 uint64
+ _, x99 = bits.Mul64(x91, 0xd2b51da312547e1b)
+ var x101 uint64
+ var x102 uint64
+ x102, x101 = bits.Mul64(x99, 0x1000000000000000)
+ var x103 uint64
+ var x104 uint64
+ x104, x103 = bits.Mul64(x99, 0x14def9dea2f79cd6)
+ var x105 uint64
+ var x106 uint64
+ x106, x105 = bits.Mul64(x99, 0x5812631a5cf5d3ed)
+ var x107 uint64
+ var x108 uint64
+ x107, x108 = bits.Add64(x106, x103, uint64(0x0))
+ var x110 uint64
+ _, x110 = bits.Add64(x91, x105, uint64(0x0))
+ var x111 uint64
+ var x112 uint64
+ x111, x112 = bits.Add64(x93, x107, uint64(fiatScalarUint1(x110)))
+ var x113 uint64
+ var x114 uint64
+ x113, x114 = bits.Add64(x95, (uint64(fiatScalarUint1(x108)) + x104), uint64(fiatScalarUint1(x112)))
+ var x115 uint64
+ var x116 uint64
+ x115, x116 = bits.Add64(x97, x101, uint64(fiatScalarUint1(x114)))
+ var x117 uint64
+ var x118 uint64
+ x118, x117 = bits.Mul64(x3, 0x399411b7c309a3d)
+ var x119 uint64
+ var x120 uint64
+ x120, x119 = bits.Mul64(x3, 0xceec73d217f5be65)
+ var x121 uint64
+ var x122 uint64
+ x122, x121 = bits.Mul64(x3, 0xd00e1ba768859347)
+ var x123 uint64
+ var x124 uint64
+ x124, x123 = bits.Mul64(x3, 0xa40611e3449c0f01)
+ var x125 uint64
+ var x126 uint64
+ x125, x126 = bits.Add64(x124, x121, uint64(0x0))
+ var x127 uint64
+ var x128 uint64
+ x127, x128 = bits.Add64(x122, x119, uint64(fiatScalarUint1(x126)))
+ var x129 uint64
+ var x130 uint64
+ x129, x130 = bits.Add64(x120, x117, uint64(fiatScalarUint1(x128)))
+ var x131 uint64
+ var x132 uint64
+ x131, x132 = bits.Add64(x111, x123, uint64(0x0))
+ var x133 uint64
+ var x134 uint64
+ x133, x134 = bits.Add64(x113, x125, uint64(fiatScalarUint1(x132)))
+ var x135 uint64
+ var x136 uint64
+ x135, x136 = bits.Add64(x115, x127, uint64(fiatScalarUint1(x134)))
+ var x137 uint64
+ var x138 uint64
+ x137, x138 = bits.Add64(((uint64(fiatScalarUint1(x116)) + (uint64(fiatScalarUint1(x98)) + (uint64(fiatScalarUint1(x90)) + x78))) + x102), x129, uint64(fiatScalarUint1(x136)))
+ var x139 uint64
+ _, x139 = bits.Mul64(x131, 0xd2b51da312547e1b)
+ var x141 uint64
+ var x142 uint64
+ x142, x141 = bits.Mul64(x139, 0x1000000000000000)
+ var x143 uint64
+ var x144 uint64
+ x144, x143 = bits.Mul64(x139, 0x14def9dea2f79cd6)
+ var x145 uint64
+ var x146 uint64
+ x146, x145 = bits.Mul64(x139, 0x5812631a5cf5d3ed)
+ var x147 uint64
+ var x148 uint64
+ x147, x148 = bits.Add64(x146, x143, uint64(0x0))
+ var x150 uint64
+ _, x150 = bits.Add64(x131, x145, uint64(0x0))
+ var x151 uint64
+ var x152 uint64
+ x151, x152 = bits.Add64(x133, x147, uint64(fiatScalarUint1(x150)))
+ var x153 uint64
+ var x154 uint64
+ x153, x154 = bits.Add64(x135, (uint64(fiatScalarUint1(x148)) + x144), uint64(fiatScalarUint1(x152)))
+ var x155 uint64
+ var x156 uint64
+ x155, x156 = bits.Add64(x137, x141, uint64(fiatScalarUint1(x154)))
+ x157 := ((uint64(fiatScalarUint1(x156)) + (uint64(fiatScalarUint1(x138)) + (uint64(fiatScalarUint1(x130)) + x118))) + x142)
+ var x158 uint64
+ var x159 uint64
+ x158, x159 = bits.Sub64(x151, 0x5812631a5cf5d3ed, uint64(0x0))
+ var x160 uint64
+ var x161 uint64
+ x160, x161 = bits.Sub64(x153, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x159)))
+ var x162 uint64
+ var x163 uint64
+ x162, x163 = bits.Sub64(x155, uint64(0x0), uint64(fiatScalarUint1(x161)))
+ var x164 uint64
+ var x165 uint64
+ x164, x165 = bits.Sub64(x157, 0x1000000000000000, uint64(fiatScalarUint1(x163)))
+ var x167 uint64
+ _, x167 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x165)))
+ var x168 uint64
+ fiatScalarCmovznzU64(&x168, fiatScalarUint1(x167), x158, x151)
+ var x169 uint64
+ fiatScalarCmovznzU64(&x169, fiatScalarUint1(x167), x160, x153)
+ var x170 uint64
+ fiatScalarCmovznzU64(&x170, fiatScalarUint1(x167), x162, x155)
+ var x171 uint64
+ fiatScalarCmovznzU64(&x171, fiatScalarUint1(x167), x164, x157)
+ out1[0] = x168
+ out1[1] = x169
+ out1[2] = x170
+ out1[3] = x171
+}
+
+// fiatScalarToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ eval arg1 < m
+//
+// Postconditions:
+//
+// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31]
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
+func fiatScalarToBytes(out1 *[32]uint8, arg1 *[4]uint64) {
+ x1 := arg1[3]
+ x2 := arg1[2]
+ x3 := arg1[1]
+ x4 := arg1[0]
+ x5 := (uint8(x4) & 0xff)
+ x6 := (x4 >> 8)
+ x7 := (uint8(x6) & 0xff)
+ x8 := (x6 >> 8)
+ x9 := (uint8(x8) & 0xff)
+ x10 := (x8 >> 8)
+ x11 := (uint8(x10) & 0xff)
+ x12 := (x10 >> 8)
+ x13 := (uint8(x12) & 0xff)
+ x14 := (x12 >> 8)
+ x15 := (uint8(x14) & 0xff)
+ x16 := (x14 >> 8)
+ x17 := (uint8(x16) & 0xff)
+ x18 := uint8((x16 >> 8))
+ x19 := (uint8(x3) & 0xff)
+ x20 := (x3 >> 8)
+ x21 := (uint8(x20) & 0xff)
+ x22 := (x20 >> 8)
+ x23 := (uint8(x22) & 0xff)
+ x24 := (x22 >> 8)
+ x25 := (uint8(x24) & 0xff)
+ x26 := (x24 >> 8)
+ x27 := (uint8(x26) & 0xff)
+ x28 := (x26 >> 8)
+ x29 := (uint8(x28) & 0xff)
+ x30 := (x28 >> 8)
+ x31 := (uint8(x30) & 0xff)
+ x32 := uint8((x30 >> 8))
+ x33 := (uint8(x2) & 0xff)
+ x34 := (x2 >> 8)
+ x35 := (uint8(x34) & 0xff)
+ x36 := (x34 >> 8)
+ x37 := (uint8(x36) & 0xff)
+ x38 := (x36 >> 8)
+ x39 := (uint8(x38) & 0xff)
+ x40 := (x38 >> 8)
+ x41 := (uint8(x40) & 0xff)
+ x42 := (x40 >> 8)
+ x43 := (uint8(x42) & 0xff)
+ x44 := (x42 >> 8)
+ x45 := (uint8(x44) & 0xff)
+ x46 := uint8((x44 >> 8))
+ x47 := (uint8(x1) & 0xff)
+ x48 := (x1 >> 8)
+ x49 := (uint8(x48) & 0xff)
+ x50 := (x48 >> 8)
+ x51 := (uint8(x50) & 0xff)
+ x52 := (x50 >> 8)
+ x53 := (uint8(x52) & 0xff)
+ x54 := (x52 >> 8)
+ x55 := (uint8(x54) & 0xff)
+ x56 := (x54 >> 8)
+ x57 := (uint8(x56) & 0xff)
+ x58 := (x56 >> 8)
+ x59 := (uint8(x58) & 0xff)
+ x60 := uint8((x58 >> 8))
+ out1[0] = x5
+ out1[1] = x7
+ out1[2] = x9
+ out1[3] = x11
+ out1[4] = x13
+ out1[5] = x15
+ out1[6] = x17
+ out1[7] = x18
+ out1[8] = x19
+ out1[9] = x21
+ out1[10] = x23
+ out1[11] = x25
+ out1[12] = x27
+ out1[13] = x29
+ out1[14] = x31
+ out1[15] = x32
+ out1[16] = x33
+ out1[17] = x35
+ out1[18] = x37
+ out1[19] = x39
+ out1[20] = x41
+ out1[21] = x43
+ out1[22] = x45
+ out1[23] = x46
+ out1[24] = x47
+ out1[25] = x49
+ out1[26] = x51
+ out1[27] = x53
+ out1[28] = x55
+ out1[29] = x57
+ out1[30] = x59
+ out1[31] = x60
+}
+
+// fiatScalarFromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
+//
+// Preconditions:
+//
+// 0 ≤ bytes_eval arg1 < m
+//
+// Postconditions:
+//
+// eval out1 mod m = bytes_eval arg1 mod m
+// 0 ≤ eval out1 < m
+//
+// Input Bounds:
+//
+// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
+//
+// Output Bounds:
+//
+// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
+func fiatScalarFromBytes(out1 *[4]uint64, arg1 *[32]uint8) {
+ x1 := (uint64(arg1[31]) << 56)
+ x2 := (uint64(arg1[30]) << 48)
+ x3 := (uint64(arg1[29]) << 40)
+ x4 := (uint64(arg1[28]) << 32)
+ x5 := (uint64(arg1[27]) << 24)
+ x6 := (uint64(arg1[26]) << 16)
+ x7 := (uint64(arg1[25]) << 8)
+ x8 := arg1[24]
+ x9 := (uint64(arg1[23]) << 56)
+ x10 := (uint64(arg1[22]) << 48)
+ x11 := (uint64(arg1[21]) << 40)
+ x12 := (uint64(arg1[20]) << 32)
+ x13 := (uint64(arg1[19]) << 24)
+ x14 := (uint64(arg1[18]) << 16)
+ x15 := (uint64(arg1[17]) << 8)
+ x16 := arg1[16]
+ x17 := (uint64(arg1[15]) << 56)
+ x18 := (uint64(arg1[14]) << 48)
+ x19 := (uint64(arg1[13]) << 40)
+ x20 := (uint64(arg1[12]) << 32)
+ x21 := (uint64(arg1[11]) << 24)
+ x22 := (uint64(arg1[10]) << 16)
+ x23 := (uint64(arg1[9]) << 8)
+ x24 := arg1[8]
+ x25 := (uint64(arg1[7]) << 56)
+ x26 := (uint64(arg1[6]) << 48)
+ x27 := (uint64(arg1[5]) << 40)
+ x28 := (uint64(arg1[4]) << 32)
+ x29 := (uint64(arg1[3]) << 24)
+ x30 := (uint64(arg1[2]) << 16)
+ x31 := (uint64(arg1[1]) << 8)
+ x32 := arg1[0]
+ x33 := (x31 + uint64(x32))
+ x34 := (x30 + x33)
+ x35 := (x29 + x34)
+ x36 := (x28 + x35)
+ x37 := (x27 + x36)
+ x38 := (x26 + x37)
+ x39 := (x25 + x38)
+ x40 := (x23 + uint64(x24))
+ x41 := (x22 + x40)
+ x42 := (x21 + x41)
+ x43 := (x20 + x42)
+ x44 := (x19 + x43)
+ x45 := (x18 + x44)
+ x46 := (x17 + x45)
+ x47 := (x15 + uint64(x16))
+ x48 := (x14 + x47)
+ x49 := (x13 + x48)
+ x50 := (x12 + x49)
+ x51 := (x11 + x50)
+ x52 := (x10 + x51)
+ x53 := (x9 + x52)
+ x54 := (x7 + uint64(x8))
+ x55 := (x6 + x54)
+ x56 := (x5 + x55)
+ x57 := (x4 + x56)
+ x58 := (x3 + x57)
+ x59 := (x2 + x58)
+ x60 := (x1 + x59)
+ out1[0] = x39
+ out1[1] = x46
+ out1[2] = x53
+ out1[3] = x60
+}
diff --git a/vendor/filippo.io/edwards25519/scalarmult.go b/vendor/filippo.io/edwards25519/scalarmult.go
new file mode 100644
index 000000000..f7ca3cef9
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/scalarmult.go
@@ -0,0 +1,214 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import "sync"
+
+// basepointTable is a set of 32 affineLookupTables, where table i is generated
+// from 256i * basepoint. It is precomputed the first time it's used.
+func basepointTable() *[32]affineLookupTable {
+ basepointTablePrecomp.initOnce.Do(func() {
+ p := NewGeneratorPoint()
+ for i := 0; i < 32; i++ {
+ basepointTablePrecomp.table[i].FromP3(p)
+ for j := 0; j < 8; j++ {
+ p.Add(p, p)
+ }
+ }
+ })
+ return &basepointTablePrecomp.table
+}
+
+var basepointTablePrecomp struct {
+ table [32]affineLookupTable
+ initOnce sync.Once
+}
+
+// ScalarBaseMult sets v = x * B, where B is the canonical generator, and
+// returns v.
+//
+// The scalar multiplication is done in constant time.
+func (v *Point) ScalarBaseMult(x *Scalar) *Point {
+ basepointTable := basepointTable()
+
+ // Write x = sum(x_i * 16^i) so x*B = sum( B*x_i*16^i )
+ // as described in the Ed25519 paper
+ //
+ // Group even and odd coefficients
+ // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
+ // + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B
+ // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
+ // + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B)
+ //
+ // We use a lookup table for each i to get x_i*16^(2*i)*B
+ // and do four doublings to multiply by 16.
+ digits := x.signedRadix16()
+
+ multiple := &affineCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+
+ // Accumulate the odd components first
+ v.Set(NewIdentityPoint())
+ for i := 1; i < 64; i += 2 {
+ basepointTable[i/2].SelectInto(multiple, digits[i])
+ tmp1.AddAffine(v, multiple)
+ v.fromP1xP1(tmp1)
+ }
+
+ // Multiply by 16
+ tmp2.FromP3(v) // tmp2 = v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 2*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*v in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*v in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*v in P1xP1 coords
+ v.fromP1xP1(tmp1) // now v = 16*(odd components)
+
+ // Accumulate the even components
+ for i := 0; i < 64; i += 2 {
+ basepointTable[i/2].SelectInto(multiple, digits[i])
+ tmp1.AddAffine(v, multiple)
+ v.fromP1xP1(tmp1)
+ }
+
+ return v
+}
+
+// ScalarMult sets v = x * q, and returns v.
+//
+// The scalar multiplication is done in constant time.
+func (v *Point) ScalarMult(x *Scalar, q *Point) *Point {
+ checkInitialized(q)
+
+ var table projLookupTable
+ table.FromP3(q)
+
+ // Write x = sum(x_i * 16^i)
+ // so x*Q = sum( Q*x_i*16^i )
+ // = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... )
+ // <------compute inside out---------
+ //
+ // We use the lookup table to get the x_i*Q values
+ // and do four doublings to compute 16*Q
+ digits := x.signedRadix16()
+
+ // Unwrap first loop iteration to save computing 16*identity
+ multiple := &projCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ table.SelectInto(multiple, digits[63])
+
+ v.Set(NewIdentityPoint())
+ tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords
+ for i := 62; i >= 0; i-- {
+ tmp2.FromP1xP1(tmp1) // tmp2 = (prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
+ tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
+ tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
+ v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
+ table.SelectInto(multiple, digits[i])
+ tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords
+ }
+ v.fromP1xP1(tmp1)
+ return v
+}
+
+// basepointNafTable is the nafLookupTable8 for the basepoint.
+// It is precomputed the first time it's used.
+func basepointNafTable() *nafLookupTable8 {
+ basepointNafTablePrecomp.initOnce.Do(func() {
+ basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint())
+ })
+ return &basepointNafTablePrecomp.table
+}
+
+var basepointNafTablePrecomp struct {
+ table nafLookupTable8
+ initOnce sync.Once
+}
+
+// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical
+// generator, and returns v.
+//
+// Execution time depends on the inputs.
+func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point {
+ checkInitialized(A)
+
+ // Similarly to the single variable-base approach, we compute
+ // digits and use them with a lookup table. However, because
+ // we are allowed to do variable-time operations, we don't
+ // need constant-time lookups or constant-time digit
+ // computations.
+ //
+ // So we use a non-adjacent form of some width w instead of
+ // radix 16. This is like a binary representation (one digit
+ // for each binary place) but we allow the digits to grow in
+ // magnitude up to 2^{w-1} so that the nonzero digits are as
+ // sparse as possible. Intuitively, this "condenses" the
+ // "mass" of the scalar onto sparse coefficients (meaning
+ // fewer additions).
+
+ basepointNafTable := basepointNafTable()
+ var aTable nafLookupTable5
+ aTable.FromP3(A)
+ // Because the basepoint is fixed, we can use a wider NAF
+ // corresponding to a bigger table.
+ aNaf := a.nonAdjacentForm(5)
+ bNaf := b.nonAdjacentForm(8)
+
+ // Find the first nonzero coefficient.
+ i := 255
+ for j := i; j >= 0; j-- {
+ if aNaf[j] != 0 || bNaf[j] != 0 {
+ break
+ }
+ }
+
+ multA := &projCached{}
+ multB := &affineCached{}
+ tmp1 := &projP1xP1{}
+ tmp2 := &projP2{}
+ tmp2.Zero()
+
+ // Move from high to low bits, doubling the accumulator
+ // at each iteration and checking whether there is a nonzero
+ // coefficient to look up a multiple of.
+ for ; i >= 0; i-- {
+ tmp1.Double(tmp2)
+
+ // Only update v if we have a nonzero coeff to add in.
+ if aNaf[i] > 0 {
+ v.fromP1xP1(tmp1)
+ aTable.SelectInto(multA, aNaf[i])
+ tmp1.Add(v, multA)
+ } else if aNaf[i] < 0 {
+ v.fromP1xP1(tmp1)
+ aTable.SelectInto(multA, -aNaf[i])
+ tmp1.Sub(v, multA)
+ }
+
+ if bNaf[i] > 0 {
+ v.fromP1xP1(tmp1)
+ basepointNafTable.SelectInto(multB, bNaf[i])
+ tmp1.AddAffine(v, multB)
+ } else if bNaf[i] < 0 {
+ v.fromP1xP1(tmp1)
+ basepointNafTable.SelectInto(multB, -bNaf[i])
+ tmp1.SubAffine(v, multB)
+ }
+
+ tmp2.FromP1xP1(tmp1)
+ }
+
+ v.fromP2(tmp2)
+ return v
+}
diff --git a/vendor/filippo.io/edwards25519/tables.go b/vendor/filippo.io/edwards25519/tables.go
new file mode 100644
index 000000000..83234bbc0
--- /dev/null
+++ b/vendor/filippo.io/edwards25519/tables.go
@@ -0,0 +1,129 @@
+// Copyright (c) 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+import (
+ "crypto/subtle"
+)
+
+// A dynamic lookup table for variable-base, constant-time scalar muls.
+type projLookupTable struct {
+ points [8]projCached
+}
+
+// A precomputed lookup table for fixed-base, constant-time scalar muls.
+type affineLookupTable struct {
+ points [8]affineCached
+}
+
+// A dynamic lookup table for variable-base, variable-time scalar muls.
+type nafLookupTable5 struct {
+ points [8]projCached
+}
+
+// A precomputed lookup table for fixed-base, variable-time scalar muls.
+type nafLookupTable8 struct {
+ points [64]affineCached
+}
+
+// Constructors.
+
+// Builds a lookup table at runtime. Fast.
+func (v *projLookupTable) FromP3(q *Point) {
+ // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
+ // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
+ v.points[0].FromP3(q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ // Compute (i+1)*Q as Q + i*Q and convert to a projCached
+ // This is needlessly complicated because the API has explicit
+ // receivers instead of creating stack objects and relying on RVO
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i])))
+ }
+}
+
+// This is not optimised for speed; fixed-base tables should be precomputed.
+func (v *affineLookupTable) FromP3(q *Point) {
+ // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
+ // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
+ v.points[0].FromP3(q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ // Compute (i+1)*Q as Q + i*Q and convert to affineCached
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i])))
+ }
+}
+
+// Builds a lookup table at runtime. Fast.
+func (v *nafLookupTable5) FromP3(q *Point) {
+ // Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q
+ // This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q
+ v.points[0].FromP3(q)
+ q2 := Point{}
+ q2.Add(q, q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 7; i++ {
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i])))
+ }
+}
+
+// This is not optimised for speed; fixed-base tables should be precomputed.
+func (v *nafLookupTable8) FromP3(q *Point) {
+ v.points[0].FromP3(q)
+ q2 := Point{}
+ q2.Add(q, q)
+ tmpP3 := Point{}
+ tmpP1xP1 := projP1xP1{}
+ for i := 0; i < 63; i++ {
+ v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i])))
+ }
+}
+
+// Selectors.
+
+// Set dest to x*Q, where -8 <= x <= 8, in constant time.
+func (v *projLookupTable) SelectInto(dest *projCached, x int8) {
+ // Compute xabs = |x|
+ xmask := x >> 7
+ xabs := uint8((x + xmask) ^ xmask)
+
+ dest.Zero()
+ for j := 1; j <= 8; j++ {
+ // Set dest = j*Q if |x| = j
+ cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
+ dest.Select(&v.points[j-1], dest, cond)
+ }
+ // Now dest = |x|*Q, conditionally negate to get x*Q
+ dest.CondNeg(int(xmask & 1))
+}
+
+// Set dest to x*Q, where -8 <= x <= 8, in constant time.
+func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) {
+ // Compute xabs = |x|
+ xmask := x >> 7
+ xabs := uint8((x + xmask) ^ xmask)
+
+ dest.Zero()
+ for j := 1; j <= 8; j++ {
+ // Set dest = j*Q if |x| = j
+ cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
+ dest.Select(&v.points[j-1], dest, cond)
+ }
+ // Now dest = |x|*Q, conditionally negate to get x*Q
+ dest.CondNeg(int(xmask & 1))
+}
+
+// Given odd x with 0 < x < 2^4, return x*Q (in variable time).
+func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) {
+ *dest = v.points[x/2]
+}
+
+// Given odd x with 0 < x < 2^7, return x*Q (in variable time).
+func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) {
+ *dest = v.points[x/2]
+}
diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml
index bb83c6670..17c4d0a71 100644
--- a/vendor/github.com/asaskevich/govalidator/.travis.yml
+++ b/vendor/github.com/asaskevich/govalidator/.travis.yml
@@ -1,12 +1,18 @@
+dist: bionic
language: go
-dist: xenial
+env: GO111MODULE=on GOFLAGS='-mod vendor'
+install: true
+email: false
+
go:
- - '1.10'
- - '1.11'
- - '1.12'
- - '1.13'
- - 'tip'
+ - 1.10
+ - 1.11
+ - 1.12
+ - 1.13
+ - tip
+before_script:
+ - go install github.com/golangci/golangci-lint/cmd/golangci-lint
script:
- - go test -coverpkg=./... -coverprofile=coverage.info -timeout=5s
- - bash <(curl -s https://codecov.io/bash)
+ - golangci-lint run # run a bunch of code checkers/linters in parallel
+ - go test -v -race ./... # Run all the tests with the race detector enabled
diff --git a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md
deleted file mode 100644
index 4b462b0d8..000000000
--- a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# Contributor Code of Conduct
-
-This project adheres to [The Code Manifesto](http://codemanifesto.com)
-as its guidelines for contributor interactions.
-
-## The Code Manifesto
-
-We want to work in an ecosystem that empowers developers to reach their
-potential — one that encourages growth and effective collaboration. A space
-that is safe for all.
-
-A space such as this benefits everyone that participates in it. It encourages
-new developers to enter our field. It is through discussion and collaboration
-that we grow, and through growth that we improve.
-
-In the effort to create such a place, we hold to these values:
-
-1. **Discrimination limits us.** This includes discrimination on the basis of
- race, gender, sexual orientation, gender identity, age, nationality,
- technology and any other arbitrary exclusion of a group of people.
-2. **Boundaries honor us.** Your comfort levels are not everyone’s comfort
- levels. Remember that, and if brought to your attention, heed it.
-3. **We are our biggest assets.** None of us were born masters of our trade.
- Each of us has been helped along the way. Return that favor, when and where
- you can.
-4. **We are resources for the future.** As an extension of #3, share what you
- know. Make yourself a resource to help those that come after you.
-5. **Respect defines us.** Treat others as you wish to be treated. Make your
- discussions, criticisms and debates from a position of respectfulness. Ask
- yourself, is it true? Is it necessary? Is it constructive? Anything less is
- unacceptable.
-6. **Reactions require grace.** Angry responses are valid, but abusive language
- and vindictive actions are toxic. When something happens that offends you,
- handle it assertively, but be respectful. Escalate reasonably, and try to
- allow the offender an opportunity to explain themselves, and possibly
- correct the issue.
-7. **Opinions are just that: opinions.** Each and every one of us, due to our
- background and upbringing, have varying opinions. That is perfectly
- acceptable. Remember this: if you respect your own opinions, you should
- respect the opinions of others.
-8. **To err is human.** You might not intend it, but mistakes do happen and
- contribute to build experience. Tolerate honest mistakes, and don't
- hesitate to apologize if you make one yourself.
diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE
index cacba9102..2f9a31fad 100644
--- a/vendor/github.com/asaskevich/govalidator/LICENSE
+++ b/vendor/github.com/asaskevich/govalidator/LICENSE
@@ -1,6 +1,6 @@
The MIT License (MIT)
-Copyright (c) 2014-2020 Alex Saskevich
+Copyright (c) 2014 Alex Saskevich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md
index 2c3fc35eb..78f999e83 100644
--- a/vendor/github.com/asaskevich/govalidator/README.md
+++ b/vendor/github.com/asaskevich/govalidator/README.md
@@ -1,8 +1,7 @@
govalidator
===========
-[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator)
-[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator)
-[![Coverage](https://codecov.io/gh/asaskevich/govalidator/branch/master/graph/badge.svg)](https://codecov.io/gh/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield)
+[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043)
+[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield)
A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js).
@@ -177,7 +176,6 @@ func IsPrintableASCII(str string) bool
func IsRFC3339(str string) bool
func IsRFC3339WithoutZone(str string) bool
func IsRGBcolor(str string) bool
-func IsRegex(str string) bool
func IsRequestURI(rawurl string) bool
func IsRequestURL(rawurl string) bool
func IsRipeMD128(str string) bool
@@ -204,7 +202,6 @@ func IsUUID(str string) bool
func IsUUIDv3(str string) bool
func IsUUIDv4(str string) bool
func IsUUIDv5(str string) bool
-func IsULID(str string) bool
func IsUnixTime(str string) bool
func IsUpperCase(str string) bool
func IsVariableWidth(str string) bool
@@ -282,7 +279,7 @@ type User struct {
Age int `valid:"type(int)"`
Meta interface{} `valid:"type(string)"`
}
-result, err := govalidator.ValidateStruct(User{"Bob", 20, "meta"})
+result, err := govalidator.ValidateStruct(user{"Bob", 20, "meta"})
if err != nil {
println("error: " + err.Error())
}
@@ -384,7 +381,6 @@ Here is a list of available validators for struct fields (validator - used funct
"rfc3339WithoutZone": IsRFC3339WithoutZone,
"ISO3166Alpha2": IsISO3166Alpha2,
"ISO3166Alpha3": IsISO3166Alpha3,
-"ulid": IsULID,
```
Validators with parameters
@@ -396,8 +392,6 @@ Validators with parameters
"matches(pattern)": StringMatches,
"in(string1|string2|...|stringN)": IsIn,
"rsapub(keylength)" : IsRsaPub,
-"minstringlength(int): MinStringLength,
-"maxstringlength(int): MaxStringLength,
```
Validators with parameters for any type
diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go
index 3e1da7cb4..5bace2654 100644
--- a/vendor/github.com/asaskevich/govalidator/arrays.go
+++ b/vendor/github.com/asaskevich/govalidator/arrays.go
@@ -9,35 +9,6 @@ type ResultIterator func(interface{}, int) interface{}
// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean
type ConditionIterator func(interface{}, int) bool
-// ReduceIterator is the function that accepts two element of slice/array and returns result of merging those values
-type ReduceIterator func(interface{}, interface{}) interface{}
-
-// Some validates that any item of array corresponds to ConditionIterator. Returns boolean.
-func Some(array []interface{}, iterator ConditionIterator) bool {
- res := false
- for index, data := range array {
- res = res || iterator(data, index)
- }
- return res
-}
-
-// Every validates that every item of array corresponds to ConditionIterator. Returns boolean.
-func Every(array []interface{}, iterator ConditionIterator) bool {
- res := true
- for index, data := range array {
- res = res && iterator(data, index)
- }
- return res
-}
-
-// Reduce boils down a list of values into a single value by ReduceIterator
-func Reduce(array []interface{}, iterator ReduceIterator, initialValue interface{}) interface{} {
- for _, data := range array {
- initialValue = iterator(initialValue, data)
- }
- return initialValue
-}
-
// Each iterates over the slice and apply Iterator to every item
func Each(array []interface{}, iterator Iterator) {
for index, data := range array {
diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go
index d68e990fc..cf1e5d569 100644
--- a/vendor/github.com/asaskevich/govalidator/converter.go
+++ b/vendor/github.com/asaskevich/govalidator/converter.go
@@ -10,7 +10,7 @@ import (
// ToString convert the input to a string.
func ToString(obj interface{}) string {
res := fmt.Sprintf("%v", obj)
- return res
+ return string(res)
}
// ToJSON convert the input to a valid JSON string
@@ -23,27 +23,12 @@ func ToJSON(obj interface{}) (string, error) {
}
// ToFloat convert the input string to a float, or 0.0 if the input is not a float.
-func ToFloat(value interface{}) (res float64, err error) {
- val := reflect.ValueOf(value)
-
- switch value.(type) {
- case int, int8, int16, int32, int64:
- res = float64(val.Int())
- case uint, uint8, uint16, uint32, uint64:
- res = float64(val.Uint())
- case float32, float64:
- res = val.Float()
- case string:
- res, err = strconv.ParseFloat(val.String(), 64)
- if err != nil {
- res = 0
- }
- default:
- err = fmt.Errorf("ToInt: unknown interface type %T", value)
- res = 0
+func ToFloat(str string) (float64, error) {
+ res, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ res = 0.0
}
-
- return
+ return res, err
}
// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer.
@@ -55,8 +40,6 @@ func ToInt(value interface{}) (res int64, err error) {
res = val.Int()
case uint, uint8, uint16, uint32, uint64:
res = int64(val.Uint())
- case float32, float64:
- res = int64(val.Float())
case string:
if IsInt(val.String()) {
res, err = strconv.ParseInt(val.String(), 0, 64)
@@ -64,11 +47,11 @@ func ToInt(value interface{}) (res int64, err error) {
res = 0
}
} else {
- err = fmt.Errorf("ToInt: invalid numeric format %g", value)
+ err = fmt.Errorf("math: square root of negative number %g", value)
res = 0
}
default:
- err = fmt.Errorf("ToInt: unknown interface type %T", value)
+ err = fmt.Errorf("math: square root of negative number %g", value)
res = 0
}
diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go
index 5041d9e86..7e6c652e1 100644
--- a/vendor/github.com/asaskevich/govalidator/numerics.go
+++ b/vendor/github.com/asaskevich/govalidator/numerics.go
@@ -2,6 +2,7 @@ package govalidator
import (
"math"
+ "reflect"
)
// Abs returns absolute value of number
@@ -40,7 +41,7 @@ func IsNonPositive(value float64) bool {
return value <= 0
}
-// InRangeInt returns true if value lies between left and right border
+// InRange returns true if value lies between left and right border
func InRangeInt(value, left, right interface{}) bool {
value64, _ := ToInt(value)
left64, _ := ToInt(left)
@@ -51,7 +52,7 @@ func InRangeInt(value, left, right interface{}) bool {
return value64 >= left64 && value64 <= right64
}
-// InRangeFloat32 returns true if value lies between left and right border
+// InRange returns true if value lies between left and right border
func InRangeFloat32(value, left, right float32) bool {
if left > right {
left, right = right, left
@@ -59,7 +60,7 @@ func InRangeFloat32(value, left, right float32) bool {
return value >= left && value <= right
}
-// InRangeFloat64 returns true if value lies between left and right border
+// InRange returns true if value lies between left and right border
func InRangeFloat64(value, left, right float64) bool {
if left > right {
left, right = right, left
@@ -67,24 +68,20 @@ func InRangeFloat64(value, left, right float64) bool {
return value >= left && value <= right
}
-// InRange returns true if value lies between left and right border, generic type to handle int, float32, float64 and string.
-// All types must the same type.
-// False if value doesn't lie in range or if it incompatible or not comparable
+// InRange returns true if value lies between left and right border, generic type to handle int, float32 or float64, all types must the same type
func InRange(value interface{}, left interface{}, right interface{}) bool {
- switch value.(type) {
- case int:
- intValue, _ := ToInt(value)
- intLeft, _ := ToInt(left)
- intRight, _ := ToInt(right)
- return InRangeInt(intValue, intLeft, intRight)
- case float32, float64:
- intValue, _ := ToFloat(value)
- intLeft, _ := ToFloat(left)
- intRight, _ := ToFloat(right)
- return InRangeFloat64(intValue, intLeft, intRight)
- case string:
- return value.(string) >= left.(string) && value.(string) <= right.(string)
- default:
+
+ reflectValue := reflect.TypeOf(value).Kind()
+ reflectLeft := reflect.TypeOf(left).Kind()
+ reflectRight := reflect.TypeOf(right).Kind()
+
+ if reflectValue == reflect.Int && reflectLeft == reflect.Int && reflectRight == reflect.Int {
+ return InRangeInt(value.(int), left.(int), right.(int))
+ } else if reflectValue == reflect.Float32 && reflectLeft == reflect.Float32 && reflectRight == reflect.Float32 {
+ return InRangeFloat32(value.(float32), left.(float32), right.(float32))
+ } else if reflectValue == reflect.Float64 && reflectLeft == reflect.Float64 && reflectRight == reflect.Float64 {
+ return InRangeFloat64(value.(float64), left.(float64), right.(float64))
+ } else {
return false
}
}
diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go
index bafc3765e..e55451cff 100644
--- a/vendor/github.com/asaskevich/govalidator/patterns.go
+++ b/vendor/github.com/asaskevich/govalidator/patterns.go
@@ -38,12 +38,10 @@ const (
URLPort string = `(:(\d{1,5}))`
URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))`
URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))`
- URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
+ URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
UnixPath string = `^(/[^/\x00]*)+/?$`
- WinARPath string = `^(?:(?:[a-zA-Z]:|\\\\[a-z0-9_.$●-]+\\[a-z0-9_.$●-]+)\\|\\?[^\\/:*?"<>|\r\n]+\\?)(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
- UnixARPath string = `^((\.{0,2}/)?([^/\x00]*))+/?$`
Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
tagName string = "valid"
hasLowerCase string = ".*[[:lower:]]"
@@ -51,8 +49,6 @@ const (
hasWhitespace string = ".*[[:space:]]"
hasWhitespaceOnly string = "^[[:space:]]+$"
IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$"
- IMSI string = "^\\d{14,15}$"
- E164 string = `^\+?[1-9]\d{1,14}$`
)
// Used by IsFilePath func
@@ -100,14 +96,10 @@ var (
rxSSN = regexp.MustCompile(SSN)
rxWinPath = regexp.MustCompile(WinPath)
rxUnixPath = regexp.MustCompile(UnixPath)
- rxARWinPath = regexp.MustCompile(WinARPath)
- rxARUnixPath = regexp.MustCompile(UnixARPath)
rxSemver = regexp.MustCompile(Semver)
rxHasLowerCase = regexp.MustCompile(hasLowerCase)
rxHasUpperCase = regexp.MustCompile(hasUpperCase)
rxHasWhitespace = regexp.MustCompile(hasWhitespace)
rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly)
rxIMEI = regexp.MustCompile(IMEI)
- rxIMSI = regexp.MustCompile(IMSI)
- rxE164 = regexp.MustCompile(E164)
)
diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go
index c573abb51..b57b666f5 100644
--- a/vendor/github.com/asaskevich/govalidator/types.go
+++ b/vendor/github.com/asaskevich/govalidator/types.go
@@ -14,10 +14,8 @@ type Validator func(str string) bool
// The second parameter should be the context (in the case of validating a struct: the whole object being validated).
type CustomTypeValidator func(i interface{}, o interface{}) bool
-// ParamValidator is a wrapper for validator functions that accept additional parameters.
+// ParamValidator is a wrapper for validator functions that accepts additional parameters.
type ParamValidator func(str string, params ...string) bool
-
-// InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value
type InterfaceParamValidator func(in interface{}, params ...string) bool
type tagOptionsMap map[string]tagOption
@@ -74,13 +72,13 @@ var ParamTagMap = map[string]ParamValidator{
// ParamTagRegexMap maps param tags to their respective regexes.
var ParamTagRegexMap = map[string]*regexp.Regexp{
- "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"),
- "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
- "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"),
- "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
- "in": regexp.MustCompile(`^in\((.*)\)`),
- "matches": regexp.MustCompile(`^matches\((.+)\)$`),
- "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"),
+ "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"),
+ "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
+ "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"),
+ "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
+ "in": regexp.MustCompile(`^in\((.*)\)`),
+ "matches": regexp.MustCompile(`^matches\((.+)\)$`),
+ "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"),
"minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"),
"maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"),
}
@@ -165,7 +163,6 @@ var TagMap = map[string]Validator{
"ISO3166Alpha3": IsISO3166Alpha3,
"ISO4217": IsISO4217,
"IMEI": IsIMEI,
- "ulid": IsULID,
}
// ISO3166Entry stores country codes
@@ -450,10 +447,10 @@ var ISO4217List = []string{
"PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG",
"QAR",
"RON", "RSD", "RUB", "RWF",
- "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL",
+ "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL",
"THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS",
- "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS",
- "VEF", "VES", "VND", "VUV",
+ "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS",
+ "VEF", "VND", "VUV",
"WST",
"XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX",
"YER",
diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go
index 46ecfc84a..298f9920d 100644
--- a/vendor/github.com/asaskevich/govalidator/validator.go
+++ b/vendor/github.com/asaskevich/govalidator/validator.go
@@ -32,7 +32,7 @@ var (
const maxURLRuneCount = 2083
const minURLRuneCount = 3
-const rfc3339WithoutZone = "2006-01-02T15:04:05"
+const RF3339WithoutZone = "2006-01-02T15:04:05"
// SetFieldsRequiredByDefault causes validation to fail when struct fields
// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`).
@@ -63,13 +63,13 @@ func SetNilPtrAllowedByRequired(value bool) {
nilPtrAllowedByRequired = value
}
-// IsEmail checks if the string is an email.
+// IsEmail check if the string is an email.
func IsEmail(str string) bool {
// TODO uppercase letters are not supported
return rxEmail.MatchString(str)
}
-// IsExistingEmail checks if the string is an email of existing domain
+// IsExistingEmail check if the string is an email of existing domain
func IsExistingEmail(email string) bool {
if len(email) < 6 || len(email) > 254 {
@@ -84,13 +84,13 @@ func IsExistingEmail(email string) bool {
if len(user) > 64 {
return false
}
+ if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) {
+ return false
+ }
switch host {
case "localhost", "example.com":
return true
}
- if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) {
- return false
- }
if _, err := net.LookupMX(host); err != nil {
if _, err := net.LookupIP(host); err != nil {
return false
@@ -100,7 +100,7 @@ func IsExistingEmail(email string) bool {
return true
}
-// IsURL checks if the string is an URL.
+// IsURL check if the string is an URL.
func IsURL(str string) bool {
if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") {
return false
@@ -124,7 +124,7 @@ func IsURL(str string) bool {
return rxURL.MatchString(str)
}
-// IsRequestURL checks if the string rawurl, assuming
+// IsRequestURL check if the string rawurl, assuming
// it was received in an HTTP request, is a valid
// URL confirm to RFC 3986
func IsRequestURL(rawurl string) bool {
@@ -138,7 +138,7 @@ func IsRequestURL(rawurl string) bool {
return true
}
-// IsRequestURI checks if the string rawurl, assuming
+// IsRequestURI check if the string rawurl, assuming
// it was received in an HTTP request, is an
// absolute URI or an absolute path.
func IsRequestURI(rawurl string) bool {
@@ -146,7 +146,7 @@ func IsRequestURI(rawurl string) bool {
return err == nil
}
-// IsAlpha checks if the string contains only letters (a-zA-Z). Empty string is valid.
+// IsAlpha check if the string contains only letters (a-zA-Z). Empty string is valid.
func IsAlpha(str string) bool {
if IsNull(str) {
return true
@@ -154,7 +154,7 @@ func IsAlpha(str string) bool {
return rxAlpha.MatchString(str)
}
-//IsUTFLetter checks if the string contains only unicode letter characters.
+//IsUTFLetter check if the string contains only unicode letter characters.
//Similar to IsAlpha but for all languages. Empty string is valid.
func IsUTFLetter(str string) bool {
if IsNull(str) {
@@ -170,7 +170,7 @@ func IsUTFLetter(str string) bool {
}
-// IsAlphanumeric checks if the string contains only letters and numbers. Empty string is valid.
+// IsAlphanumeric check if the string contains only letters and numbers. Empty string is valid.
func IsAlphanumeric(str string) bool {
if IsNull(str) {
return true
@@ -178,7 +178,7 @@ func IsAlphanumeric(str string) bool {
return rxAlphanumeric.MatchString(str)
}
-// IsUTFLetterNumeric checks if the string contains only unicode letters and numbers. Empty string is valid.
+// IsUTFLetterNumeric check if the string contains only unicode letters and numbers. Empty string is valid.
func IsUTFLetterNumeric(str string) bool {
if IsNull(str) {
return true
@@ -192,7 +192,7 @@ func IsUTFLetterNumeric(str string) bool {
}
-// IsNumeric checks if the string contains only numbers. Empty string is valid.
+// IsNumeric check if the string contains only numbers. Empty string is valid.
func IsNumeric(str string) bool {
if IsNull(str) {
return true
@@ -200,7 +200,7 @@ func IsNumeric(str string) bool {
return rxNumeric.MatchString(str)
}
-// IsUTFNumeric checks if the string contains only unicode numbers of any kind.
+// IsUTFNumeric check if the string contains only unicode numbers of any kind.
// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid.
func IsUTFNumeric(str string) bool {
if IsNull(str) {
@@ -222,7 +222,7 @@ func IsUTFNumeric(str string) bool {
}
-// IsUTFDigit checks if the string contains only unicode radix-10 decimal digits. Empty string is valid.
+// IsUTFDigit check if the string contains only unicode radix-10 decimal digits. Empty string is valid.
func IsUTFDigit(str string) bool {
if IsNull(str) {
return true
@@ -243,22 +243,22 @@ func IsUTFDigit(str string) bool {
}
-// IsHexadecimal checks if the string is a hexadecimal number.
+// IsHexadecimal check if the string is a hexadecimal number.
func IsHexadecimal(str string) bool {
return rxHexadecimal.MatchString(str)
}
-// IsHexcolor checks if the string is a hexadecimal color.
+// IsHexcolor check if the string is a hexadecimal color.
func IsHexcolor(str string) bool {
return rxHexcolor.MatchString(str)
}
-// IsRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB).
+// IsRGBcolor check if the string is a valid RGB color in form rgb(RRR, GGG, BBB).
func IsRGBcolor(str string) bool {
return rxRGBcolor.MatchString(str)
}
-// IsLowerCase checks if the string is lowercase. Empty string is valid.
+// IsLowerCase check if the string is lowercase. Empty string is valid.
func IsLowerCase(str string) bool {
if IsNull(str) {
return true
@@ -266,7 +266,7 @@ func IsLowerCase(str string) bool {
return str == strings.ToLower(str)
}
-// IsUpperCase checks if the string is uppercase. Empty string is valid.
+// IsUpperCase check if the string is uppercase. Empty string is valid.
func IsUpperCase(str string) bool {
if IsNull(str) {
return true
@@ -274,7 +274,7 @@ func IsUpperCase(str string) bool {
return str == strings.ToUpper(str)
}
-// HasLowerCase checks if the string contains at least 1 lowercase. Empty string is valid.
+// HasLowerCase check if the string contains at least 1 lowercase. Empty string is valid.
func HasLowerCase(str string) bool {
if IsNull(str) {
return true
@@ -282,7 +282,7 @@ func HasLowerCase(str string) bool {
return rxHasLowerCase.MatchString(str)
}
-// HasUpperCase checks if the string contains as least 1 uppercase. Empty string is valid.
+// HasUpperCase check if the string contains as least 1 uppercase. Empty string is valid.
func HasUpperCase(str string) bool {
if IsNull(str) {
return true
@@ -290,7 +290,7 @@ func HasUpperCase(str string) bool {
return rxHasUpperCase.MatchString(str)
}
-// IsInt checks if the string is an integer. Empty string is valid.
+// IsInt check if the string is an integer. Empty string is valid.
func IsInt(str string) bool {
if IsNull(str) {
return true
@@ -298,12 +298,12 @@ func IsInt(str string) bool {
return rxInt.MatchString(str)
}
-// IsFloat checks if the string is a float.
+// IsFloat check if the string is a float.
func IsFloat(str string) bool {
return str != "" && rxFloat.MatchString(str)
}
-// IsDivisibleBy checks if the string is a number that's divisible by another.
+// IsDivisibleBy check if the string is a number that's divisible by another.
// If second argument is not valid integer or zero, it's return false.
// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero).
func IsDivisibleBy(str, num string) bool {
@@ -316,12 +316,12 @@ func IsDivisibleBy(str, num string) bool {
return (p == 0) || (p%q == 0)
}
-// IsNull checks if the string is null.
+// IsNull check if the string is null.
func IsNull(str string) bool {
return len(str) == 0
}
-// IsNotNull checks if the string is not null.
+// IsNotNull check if the string is not null.
func IsNotNull(str string) bool {
return !IsNull(str)
}
@@ -336,121 +336,34 @@ func HasWhitespace(str string) bool {
return len(str) > 0 && rxHasWhitespace.MatchString(str)
}
-// IsByteLength checks if the string's length (in bytes) falls in a range.
+// IsByteLength check if the string's length (in bytes) falls in a range.
func IsByteLength(str string, min, max int) bool {
return len(str) >= min && len(str) <= max
}
-// IsUUIDv3 checks if the string is a UUID version 3.
+// IsUUIDv3 check if the string is a UUID version 3.
func IsUUIDv3(str string) bool {
return rxUUID3.MatchString(str)
}
-// IsUUIDv4 checks if the string is a UUID version 4.
+// IsUUIDv4 check if the string is a UUID version 4.
func IsUUIDv4(str string) bool {
return rxUUID4.MatchString(str)
}
-// IsUUIDv5 checks if the string is a UUID version 5.
+// IsUUIDv5 check if the string is a UUID version 5.
func IsUUIDv5(str string) bool {
return rxUUID5.MatchString(str)
}
-// IsUUID checks if the string is a UUID (version 3, 4 or 5).
+// IsUUID check if the string is a UUID (version 3, 4 or 5).
func IsUUID(str string) bool {
return rxUUID.MatchString(str)
}
-// Byte to index table for O(1) lookups when unmarshaling.
-// We use 0xFF as sentinel value for invalid indexes.
-var ulidDec = [...]byte{
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
- 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF,
- 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
- 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
- 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14,
- 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
- 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
-}
-
-// EncodedSize is the length of a text encoded ULID.
-const ulidEncodedSize = 26
-
-// IsULID checks if the string is a ULID.
-//
-// Implementation got from:
-// https://github.com/oklog/ulid (Apache-2.0 License)
-//
-func IsULID(str string) bool {
- // Check if a base32 encoded ULID is the right length.
- if len(str) != ulidEncodedSize {
- return false
- }
-
- // Check if all the characters in a base32 encoded ULID are part of the
- // expected base32 character set.
- if ulidDec[str[0]] == 0xFF ||
- ulidDec[str[1]] == 0xFF ||
- ulidDec[str[2]] == 0xFF ||
- ulidDec[str[3]] == 0xFF ||
- ulidDec[str[4]] == 0xFF ||
- ulidDec[str[5]] == 0xFF ||
- ulidDec[str[6]] == 0xFF ||
- ulidDec[str[7]] == 0xFF ||
- ulidDec[str[8]] == 0xFF ||
- ulidDec[str[9]] == 0xFF ||
- ulidDec[str[10]] == 0xFF ||
- ulidDec[str[11]] == 0xFF ||
- ulidDec[str[12]] == 0xFF ||
- ulidDec[str[13]] == 0xFF ||
- ulidDec[str[14]] == 0xFF ||
- ulidDec[str[15]] == 0xFF ||
- ulidDec[str[16]] == 0xFF ||
- ulidDec[str[17]] == 0xFF ||
- ulidDec[str[18]] == 0xFF ||
- ulidDec[str[19]] == 0xFF ||
- ulidDec[str[20]] == 0xFF ||
- ulidDec[str[21]] == 0xFF ||
- ulidDec[str[22]] == 0xFF ||
- ulidDec[str[23]] == 0xFF ||
- ulidDec[str[24]] == 0xFF ||
- ulidDec[str[25]] == 0xFF {
- return false
- }
-
- // Check if the first character in a base32 encoded ULID will overflow. This
- // happens because the base32 representation encodes 130 bits, while the
- // ULID is only 128 bits.
- //
- // See https://github.com/oklog/ulid/issues/9 for details.
- if str[0] > '7' {
- return false
- }
- return true
-}
-
-// IsCreditCard checks if the string is a credit card.
+// IsCreditCard check if the string is a credit card.
func IsCreditCard(str string) bool {
- sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "")
+ sanitized := notNumberRegexp.ReplaceAllString(str, "")
if !rxCreditCard.MatchString(sanitized) {
return false
}
@@ -464,7 +377,7 @@ func IsCreditCard(str string) bool {
if shouldDouble {
tmpNum *= 2
if tmpNum >= 10 {
- sum += (tmpNum % 10) + 1
+ sum += ((tmpNum % 10) + 1)
} else {
sum += tmpNum
}
@@ -477,18 +390,18 @@ func IsCreditCard(str string) bool {
return sum%10 == 0
}
-// IsISBN10 checks if the string is an ISBN version 10.
+// IsISBN10 check if the string is an ISBN version 10.
func IsISBN10(str string) bool {
return IsISBN(str, 10)
}
-// IsISBN13 checks if the string is an ISBN version 13.
+// IsISBN13 check if the string is an ISBN version 13.
func IsISBN13(str string) bool {
return IsISBN(str, 13)
}
-// IsISBN checks if the string is an ISBN (version 10 or 13).
-// If version value is not equal to 10 or 13, it will be checks both variants.
+// IsISBN check if the string is an ISBN (version 10 or 13).
+// If version value is not equal to 10 or 13, it will be check both variants.
func IsISBN(str string, version int) bool {
sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "")
var checksum int32
@@ -522,13 +435,13 @@ func IsISBN(str string, version int) bool {
return IsISBN(str, 10) || IsISBN(str, 13)
}
-// IsJSON checks if the string is valid JSON (note: uses json.Unmarshal).
+// IsJSON check if the string is valid JSON (note: uses json.Unmarshal).
func IsJSON(str string) bool {
var js json.RawMessage
return json.Unmarshal([]byte(str), &js) == nil
}
-// IsMultibyte checks if the string contains one or more multibyte chars. Empty string is valid.
+// IsMultibyte check if the string contains one or more multibyte chars. Empty string is valid.
func IsMultibyte(str string) bool {
if IsNull(str) {
return true
@@ -536,7 +449,7 @@ func IsMultibyte(str string) bool {
return rxMultibyte.MatchString(str)
}
-// IsASCII checks if the string contains ASCII chars only. Empty string is valid.
+// IsASCII check if the string contains ASCII chars only. Empty string is valid.
func IsASCII(str string) bool {
if IsNull(str) {
return true
@@ -544,7 +457,7 @@ func IsASCII(str string) bool {
return rxASCII.MatchString(str)
}
-// IsPrintableASCII checks if the string contains printable ASCII chars only. Empty string is valid.
+// IsPrintableASCII check if the string contains printable ASCII chars only. Empty string is valid.
func IsPrintableASCII(str string) bool {
if IsNull(str) {
return true
@@ -552,7 +465,7 @@ func IsPrintableASCII(str string) bool {
return rxPrintableASCII.MatchString(str)
}
-// IsFullWidth checks if the string contains any full-width chars. Empty string is valid.
+// IsFullWidth check if the string contains any full-width chars. Empty string is valid.
func IsFullWidth(str string) bool {
if IsNull(str) {
return true
@@ -560,7 +473,7 @@ func IsFullWidth(str string) bool {
return rxFullWidth.MatchString(str)
}
-// IsHalfWidth checks if the string contains any half-width chars. Empty string is valid.
+// IsHalfWidth check if the string contains any half-width chars. Empty string is valid.
func IsHalfWidth(str string) bool {
if IsNull(str) {
return true
@@ -568,7 +481,7 @@ func IsHalfWidth(str string) bool {
return rxHalfWidth.MatchString(str)
}
-// IsVariableWidth checks if the string contains a mixture of full and half-width chars. Empty string is valid.
+// IsVariableWidth check if the string contains a mixture of full and half-width chars. Empty string is valid.
func IsVariableWidth(str string) bool {
if IsNull(str) {
return true
@@ -576,12 +489,12 @@ func IsVariableWidth(str string) bool {
return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str)
}
-// IsBase64 checks if a string is base64 encoded.
+// IsBase64 check if a string is base64 encoded.
func IsBase64(str string) bool {
return rxBase64.MatchString(str)
}
-// IsFilePath checks is a string is Win or Unix file path and returns it's type.
+// IsFilePath check is a string is Win or Unix file path and returns it's type.
func IsFilePath(str string) (bool, int) {
if rxWinPath.MatchString(str) {
//check windows path limit see:
@@ -596,27 +509,6 @@ func IsFilePath(str string) (bool, int) {
return false, Unknown
}
-//IsWinFilePath checks both relative & absolute paths in Windows
-func IsWinFilePath(str string) bool {
- if rxARWinPath.MatchString(str) {
- //check windows path limit see:
- // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath
- if len(str[3:]) > 32767 {
- return false
- }
- return true
- }
- return false
-}
-
-//IsUnixFilePath checks both relative & absolute paths in Unix
-func IsUnixFilePath(str string) bool {
- if rxARUnixPath.MatchString(str) {
- return true
- }
- return false
-}
-
// IsDataURI checks if a string is base64 encoded data URI such as an image
func IsDataURI(str string) bool {
dataURI := strings.Split(str, ",")
@@ -694,13 +586,11 @@ func IsHash(str string, algorithm string) bool {
len = "40"
} else if algo == "tiger192" {
len = "48"
- } else if algo == "sha3-224" {
- len = "56"
- } else if algo == "sha256" || algo == "sha3-256" {
+ } else if algo == "sha256" {
len = "64"
- } else if algo == "sha384" || algo == "sha3-384" {
+ } else if algo == "sha384" {
len = "96"
- } else if algo == "sha512" || algo == "sha3-512" {
+ } else if algo == "sha512" {
len = "128"
} else {
return false
@@ -709,26 +599,6 @@ func IsHash(str string, algorithm string) bool {
return Matches(str, "^[a-f0-9]{"+len+"}$")
}
-// IsSHA3224 checks is a string is a SHA3-224 hash. Alias for `IsHash(str, "sha3-224")`
-func IsSHA3224(str string) bool {
- return IsHash(str, "sha3-224")
-}
-
-// IsSHA3256 checks is a string is a SHA3-256 hash. Alias for `IsHash(str, "sha3-256")`
-func IsSHA3256(str string) bool {
- return IsHash(str, "sha3-256")
-}
-
-// IsSHA3384 checks is a string is a SHA3-384 hash. Alias for `IsHash(str, "sha3-384")`
-func IsSHA3384(str string) bool {
- return IsHash(str, "sha3-384")
-}
-
-// IsSHA3512 checks is a string is a SHA3-512 hash. Alias for `IsHash(str, "sha3-512")`
-func IsSHA3512(str string) bool {
- return IsHash(str, "sha3-512")
-}
-
// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")`
func IsSHA512(str string) bool {
return IsHash(str, "sha512")
@@ -816,25 +686,25 @@ func IsPort(str string) bool {
return false
}
-// IsIPv4 checks if the string is an IP version 4.
+// IsIPv4 check if the string is an IP version 4.
func IsIPv4(str string) bool {
ip := net.ParseIP(str)
return ip != nil && strings.Contains(str, ".")
}
-// IsIPv6 checks if the string is an IP version 6.
+// IsIPv6 check if the string is an IP version 6.
func IsIPv6(str string) bool {
ip := net.ParseIP(str)
return ip != nil && strings.Contains(str, ":")
}
-// IsCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6)
+// IsCIDR check if the string is an valid CIDR notiation (IPV4 & IPV6)
func IsCIDR(str string) bool {
_, _, err := net.ParseCIDR(str)
return err == nil
}
-// IsMAC checks if a string is valid MAC address.
+// IsMAC check if a string is valid MAC address.
// Possible MAC formats:
// 01:23:45:67:89:ab
// 01:23:45:67:89:ab:cd:ef
@@ -852,70 +722,27 @@ func IsHost(str string) bool {
return IsIP(str) || IsDNSName(str)
}
-// IsMongoID checks if the string is a valid hex-encoded representation of a MongoDB ObjectId.
+// IsMongoID check if the string is a valid hex-encoded representation of a MongoDB ObjectId.
func IsMongoID(str string) bool {
return rxHexadecimal.MatchString(str) && (len(str) == 24)
}
-// IsLatitude checks if a string is valid latitude.
+// IsLatitude check if a string is valid latitude.
func IsLatitude(str string) bool {
return rxLatitude.MatchString(str)
}
-// IsLongitude checks if a string is valid longitude.
+// IsLongitude check if a string is valid longitude.
func IsLongitude(str string) bool {
return rxLongitude.MatchString(str)
}
-// IsIMEI checks if a string is valid IMEI
+// IsIMEI check if a string is valid IMEI
func IsIMEI(str string) bool {
return rxIMEI.MatchString(str)
}
-// IsIMSI checks if a string is valid IMSI
-func IsIMSI(str string) bool {
- if !rxIMSI.MatchString(str) {
- return false
- }
-
- mcc, err := strconv.ParseInt(str[0:3], 10, 32)
- if err != nil {
- return false
- }
-
- switch mcc {
- case 202, 204, 206, 208, 212, 213, 214, 216, 218, 219:
- case 220, 221, 222, 226, 228, 230, 231, 232, 234, 235:
- case 238, 240, 242, 244, 246, 247, 248, 250, 255, 257:
- case 259, 260, 262, 266, 268, 270, 272, 274, 276, 278:
- case 280, 282, 283, 284, 286, 288, 289, 290, 292, 293:
- case 294, 295, 297, 302, 308, 310, 311, 312, 313, 314:
- case 315, 316, 330, 332, 334, 338, 340, 342, 344, 346:
- case 348, 350, 352, 354, 356, 358, 360, 362, 363, 364:
- case 365, 366, 368, 370, 372, 374, 376, 400, 401, 402:
- case 404, 405, 406, 410, 412, 413, 414, 415, 416, 417:
- case 418, 419, 420, 421, 422, 424, 425, 426, 427, 428:
- case 429, 430, 431, 432, 434, 436, 437, 438, 440, 441:
- case 450, 452, 454, 455, 456, 457, 460, 461, 466, 467:
- case 470, 472, 502, 505, 510, 514, 515, 520, 525, 528:
- case 530, 536, 537, 539, 540, 541, 542, 543, 544, 545:
- case 546, 547, 548, 549, 550, 551, 552, 553, 554, 555:
- case 602, 603, 604, 605, 606, 607, 608, 609, 610, 611:
- case 612, 613, 614, 615, 616, 617, 618, 619, 620, 621:
- case 622, 623, 624, 625, 626, 627, 628, 629, 630, 631:
- case 632, 633, 634, 635, 636, 637, 638, 639, 640, 641:
- case 642, 643, 645, 646, 647, 648, 649, 650, 651, 652:
- case 653, 654, 655, 657, 658, 659, 702, 704, 706, 708:
- case 710, 712, 714, 716, 722, 724, 730, 732, 734, 736:
- case 738, 740, 742, 744, 746, 748, 750, 995:
- return true
- default:
- return false
- }
- return true
-}
-
-// IsRsaPublicKey checks if a string is valid public key with provided length
+// IsRsaPublicKey check if a string is valid public key with provided length
func IsRsaPublicKey(str string, keylen int) bool {
bb := bytes.NewBufferString(str)
pemBytes, err := ioutil.ReadAll(bb)
@@ -949,14 +776,6 @@ func IsRsaPublicKey(str string, keylen int) bool {
return bitlen == int(keylen)
}
-// IsRegex checks if a give string is a valid regex with RE2 syntax or not
-func IsRegex(str string) bool {
- if _, err := regexp.Compile(str); err == nil {
- return true
- }
- return false
-}
-
func toJSONName(tag string) string {
if tag == "" {
return ""
@@ -977,7 +796,7 @@ func toJSONName(tag string) string {
return name
}
-func prependPathToErrors(err error, path string) error {
+func PrependPathToErrors(err error, path string) error {
switch err2 := err.(type) {
case Error:
err2.Path = append([]string{path}, err2.Path...)
@@ -985,18 +804,13 @@ func prependPathToErrors(err error, path string) error {
case Errors:
errors := err2.Errors()
for i, err3 := range errors {
- errors[i] = prependPathToErrors(err3, path)
+ errors[i] = PrependPathToErrors(err3, path)
}
return err2
}
return err
}
-// ValidateArray performs validation according to condition iterator that validates every element of the array
-func ValidateArray(array []interface{}, iterator ConditionIterator) bool {
- return Every(array, iterator)
-}
-
// ValidateMap use validation map for fields.
// result will be equal to `false` if there are any errors.
// s is the map containing the data to be validated.
@@ -1018,7 +832,7 @@ func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, erro
presentResult = false
var err error
err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key)
- err = prependPathToErrors(err, key)
+ err = PrependPathToErrors(err, key)
errs = append(errs, err)
}
valueField := reflect.ValueOf(value)
@@ -1032,13 +846,13 @@ func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, erro
if v, ok := value.(map[string]interface{}); !ok {
mapResult = false
err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String())
- err = prependPathToErrors(err, key)
+ err = PrependPathToErrors(err, key)
errs = append(errs, err)
} else {
mapResult, err = ValidateMap(v, subValidator)
if err != nil {
mapResult = false
- err = prependPathToErrors(err, key)
+ err = PrependPathToErrors(err, key)
errs = append(errs, err)
}
}
@@ -1049,7 +863,7 @@ func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, erro
var err error
structResult, err = ValidateStruct(valueField.Interface())
if err != nil {
- err = prependPathToErrors(err, key)
+ err = PrependPathToErrors(err, key)
errs = append(errs, err)
}
}
@@ -1070,13 +884,13 @@ func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, erro
default:
typeResult = false
err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String())
- err = prependPathToErrors(err, key)
+ err = PrependPathToErrors(err, key)
errs = append(errs, err)
}
result = result && presentResult && typeResult && resultField && structResult && mapResult
index++
}
- // checks required keys
+ // check required keys
requiredResult := true
for key, value := range m {
if schema, ok := value.(string); ok {
@@ -1135,7 +949,7 @@ func ValidateStruct(s interface{}) (bool, error) {
var err error
structResult, err = ValidateStruct(valueField.Interface())
if err != nil {
- err = prependPathToErrors(err, typeField.Name)
+ err = PrependPathToErrors(err, typeField.Name)
errs = append(errs, err)
}
}
@@ -1172,42 +986,6 @@ func ValidateStruct(s interface{}) (bool, error) {
return result, err
}
-// ValidateStructAsync performs async validation of the struct and returns results through the channels
-func ValidateStructAsync(s interface{}) (<-chan bool, <-chan error) {
- res := make(chan bool)
- errors := make(chan error)
-
- go func() {
- defer close(res)
- defer close(errors)
-
- isValid, isFailed := ValidateStruct(s)
-
- res <- isValid
- errors <- isFailed
- }()
-
- return res, errors
-}
-
-// ValidateMapAsync performs async validation of the map and returns results through the channels
-func ValidateMapAsync(s map[string]interface{}, m map[string]interface{}) (<-chan bool, <-chan error) {
- res := make(chan bool)
- errors := make(chan error)
-
- go func() {
- defer close(res)
- defer close(errors)
-
- isValid, isFailed := ValidateMap(s, m)
-
- res <- isValid
- errors <- isFailed
- }()
-
- return res, errors
-}
-
// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""}
func parseTagIntoMap(tag string) tagOptionsMap {
optionsMap := make(tagOptionsMap)
@@ -1256,12 +1034,12 @@ func IsSSN(str string) bool {
return rxSSN.MatchString(str)
}
-// IsSemver checks if string is valid semantic version
+// IsSemver check if string is valid semantic version
func IsSemver(str string) bool {
return rxSemver.MatchString(str)
}
-// IsType checks if interface is of some type
+// IsType check if interface is of some type
func IsType(v interface{}, params ...string) bool {
if len(params) == 1 {
typ := params[0]
@@ -1270,13 +1048,13 @@ func IsType(v interface{}, params ...string) bool {
return false
}
-// IsTime checks if string is valid according to given format
+// IsTime check if string is valid according to given format
func IsTime(str string, format string) bool {
_, err := time.Parse(format, str)
return err == nil
}
-// IsUnixTime checks if string is valid unix timestamp value
+// IsUnixTime check if string is valid unix timestamp value
func IsUnixTime(str string) bool {
if _, err := strconv.Atoi(str); err == nil {
return true
@@ -1284,17 +1062,17 @@ func IsUnixTime(str string) bool {
return false
}
-// IsRFC3339 checks if string is valid timestamp value according to RFC3339
+// IsRFC3339 check if string is valid timestamp value according to RFC3339
func IsRFC3339(str string) bool {
return IsTime(str, time.RFC3339)
}
-// IsRFC3339WithoutZone checks if string is valid timestamp value according to RFC3339 which excludes the timezone.
+// IsRFC3339WithoutZone check if string is valid timestamp value according to RFC3339 which excludes the timezone.
func IsRFC3339WithoutZone(str string) bool {
- return IsTime(str, rfc3339WithoutZone)
+ return IsTime(str, RF3339WithoutZone)
}
-// IsISO4217 checks if string is valid ISO currency code
+// IsISO4217 check if string is valid ISO currency code
func IsISO4217(str string) bool {
for _, currency := range ISO4217List {
if str == currency {
@@ -1305,7 +1083,7 @@ func IsISO4217(str string) bool {
return false
}
-// ByteLength checks string's length
+// ByteLength check string's length
func ByteLength(str string, params ...string) bool {
if len(params) == 2 {
min, _ := ToInt(params[0])
@@ -1316,13 +1094,13 @@ func ByteLength(str string, params ...string) bool {
return false
}
-// RuneLength checks string's length
+// RuneLength check string's length
// Alias for StringLength
func RuneLength(str string, params ...string) bool {
return StringLength(str, params...)
}
-// IsRsaPub checks whether string is valid RSA key
+// IsRsaPub check whether string is valid RSA key
// Alias for IsRsaPublicKey
func IsRsaPub(str string, params ...string) bool {
if len(params) == 1 {
@@ -1342,7 +1120,7 @@ func StringMatches(s string, params ...string) bool {
return false
}
-// StringLength checks string's length (including multi byte strings)
+// StringLength check string's length (including multi byte strings)
func StringLength(str string, params ...string) bool {
if len(params) == 2 {
@@ -1355,7 +1133,7 @@ func StringLength(str string, params ...string) bool {
return false
}
-// MinStringLength checks string's minimum length (including multi byte strings)
+// MinStringLength check string's minimum length (including multi byte strings)
func MinStringLength(str string, params ...string) bool {
if len(params) == 1 {
@@ -1367,7 +1145,7 @@ func MinStringLength(str string, params ...string) bool {
return false
}
-// MaxStringLength checks string's maximum length (including multi byte strings)
+// MaxStringLength check string's maximum length (including multi byte strings)
func MaxStringLength(str string, params ...string) bool {
if len(params) == 1 {
@@ -1379,7 +1157,7 @@ func MaxStringLength(str string, params ...string) bool {
return false
}
-// Range checks string's length
+// Range check string's length
func Range(str string, params ...string) bool {
if len(params) == 2 {
value, _ := ToFloat(str)
@@ -1391,7 +1169,6 @@ func Range(str string, params ...string) bool {
return false
}
-// IsInRaw checks if string is in list of allowed values
func IsInRaw(str string, params ...string) bool {
if len(params) == 1 {
rawParams := params[0]
@@ -1404,7 +1181,7 @@ func IsInRaw(str string, params ...string) bool {
return false
}
-// IsIn checks if string str is a member of the set of strings params
+// IsIn check if string str is a member of the set of strings params
func IsIn(str string, params ...string) bool {
for _, param := range params {
if str == param {
@@ -1442,7 +1219,7 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
tag := t.Tag.Get(tagName)
- // checks if the field should be ignored
+ // Check if the field should be ignored
switch tag {
case "":
if v.Kind() != reflect.Slice && v.Kind() != reflect.Map {
@@ -1461,8 +1238,8 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
options = parseTagIntoMap(tag)
}
- if isEmptyValue(v) {
- // an empty value is not validated, checks only required
+ if !isFieldSet(v) {
+ // an empty value is not validated, check only required
isValid, resultErr = checkRequired(v, t, options)
for key := range options {
delete(options, key)
@@ -1515,13 +1292,13 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
validator := validatorSpec
customMsgExists := len(validatorStruct.customErrorMessage) > 0
- // checks whether the tag looks like '!something' or 'something'
+ // Check whether the tag looks like '!something' or 'something'
if validator[0] == '!' {
validator = validator[1:]
negate = true
}
- // checks for interface param validators
+ // Check for interface param validators
for key, value := range InterfaceParamTagRegexMap {
ps := value.FindStringSubmatch(validator)
if len(ps) == 0 {
@@ -1554,20 +1331,20 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
reflect.Float32, reflect.Float64,
reflect.String:
- // for each tag option checks the map of validator functions
+ // for each tag option check the map of validator functions
for _, validatorSpec := range optionsOrder {
validatorStruct := options[validatorSpec]
var negate bool
validator := validatorSpec
customMsgExists := len(validatorStruct.customErrorMessage) > 0
- // checks whether the tag looks like '!something' or 'something'
+ // Check whether the tag looks like '!something' or 'something'
if validator[0] == '!' {
validator = validator[1:]
negate = true
}
- // checks for param validators
+ // Check for param validators
for key, value := range ParamTagRegexMap {
ps := value.FindStringSubmatch(validator)
if len(ps) == 0 {
@@ -1648,7 +1425,7 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
} else {
resultItem, err = ValidateStruct(v.MapIndex(k).Interface())
if err != nil {
- err = prependPathToErrors(err, t.Name+"."+sv[i].Interface().(string))
+ err = PrependPathToErrors(err, t.Name+"."+sv[i].Interface().(string))
return false, err
}
}
@@ -1668,7 +1445,7 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
} else {
resultItem, err = ValidateStruct(v.Index(i).Interface())
if err != nil {
- err = prependPathToErrors(err, t.Name+"."+strconv.Itoa(i))
+ err = PrependPathToErrors(err, t.Name+"."+strconv.Itoa(i))
return false, err
}
}
@@ -1682,7 +1459,7 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
}
return ValidateStruct(v.Interface())
case reflect.Ptr:
- // If the value is a pointer then checks its element
+ // If the value is a pointer then check its element
if v.IsNil() {
return true, nil
}
@@ -1698,26 +1475,14 @@ func stripParams(validatorString string) string {
return paramsRegexp.ReplaceAllString(validatorString, "")
}
-// isEmptyValue checks whether value empty or not
-func isEmptyValue(v reflect.Value) bool {
+// isFieldSet returns false for nil pointers, interfaces, maps, and slices. For all other values, it returns true.
+func isFieldSet(v reflect.Value) bool {
switch v.Kind() {
- case reflect.String, reflect.Array:
- return v.Len() == 0
- case reflect.Map, reflect.Slice:
- return v.Len() == 0 || v.IsNil()
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
+ case reflect.Map, reflect.Slice, reflect.Interface, reflect.Ptr:
+ return !v.IsNil()
}
- return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface())
+ return true
}
// ErrorByField returns error for specified field of the struct
@@ -1763,7 +1528,3 @@ func (sv stringValues) Len() int { return len(sv) }
func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
func (sv stringValues) get(i int) string { return sv[i].String() }
-
-func IsE164(str string) bool {
- return rxE164.MatchString(str)
-}
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
index 4b19188d9..8a290f197 100644
--- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
@@ -1,6 +1,7 @@
package md2man
import (
+ "bufio"
"bytes"
"fmt"
"io"
@@ -21,34 +22,35 @@ type roffRenderer struct {
}
const (
- titleHeader = ".TH "
- topLevelHeader = "\n\n.SH "
- secondLevelHdr = "\n.SH "
- otherHeader = "\n.SS "
- crTag = "\n"
- emphTag = "\\fI"
- emphCloseTag = "\\fP"
- strongTag = "\\fB"
- strongCloseTag = "\\fP"
- breakTag = "\n.br\n"
- paraTag = "\n.PP\n"
- hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
- linkTag = "\n\\[la]"
- linkCloseTag = "\\[ra]"
- codespanTag = "\\fB"
- codespanCloseTag = "\\fR"
- codeTag = "\n.EX\n"
- codeCloseTag = "\n.EE\n"
- quoteTag = "\n.PP\n.RS\n"
- quoteCloseTag = "\n.RE\n"
- listTag = "\n.RS\n"
- listCloseTag = "\n.RE\n"
- dtTag = "\n.TP\n"
- dd2Tag = "\n"
- tableStart = "\n.TS\nallbox;\n"
- tableEnd = ".TE\n"
- tableCellStart = "T{\n"
- tableCellEnd = "\nT}\n"
+ titleHeader = ".TH "
+ topLevelHeader = "\n\n.SH "
+ secondLevelHdr = "\n.SH "
+ otherHeader = "\n.SS "
+ crTag = "\n"
+ emphTag = "\\fI"
+ emphCloseTag = "\\fP"
+ strongTag = "\\fB"
+ strongCloseTag = "\\fP"
+ breakTag = "\n.br\n"
+ paraTag = "\n.PP\n"
+ hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
+ linkTag = "\n\\[la]"
+ linkCloseTag = "\\[ra]"
+ codespanTag = "\\fB"
+ codespanCloseTag = "\\fR"
+ codeTag = "\n.EX\n"
+ codeCloseTag = ".EE\n" // Do not prepend a newline character since code blocks, by definition, include a newline already (or at least as how blackfriday gives us on).
+ quoteTag = "\n.PP\n.RS\n"
+ quoteCloseTag = "\n.RE\n"
+ listTag = "\n.RS\n"
+ listCloseTag = "\n.RE\n"
+ dtTag = "\n.TP\n"
+ dd2Tag = "\n"
+ tableStart = "\n.TS\nallbox;\n"
+ tableEnd = ".TE\n"
+ tableCellStart = "T{\n"
+ tableCellEnd = "\nT}\n"
+ tablePreprocessor = `'\" t`
)
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
@@ -75,6 +77,16 @@ func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
// RenderHeader handles outputting the header at document start
func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
+ // We need to walk the tree to check if there are any tables.
+ // If there are, we need to enable the roff table preprocessor.
+ ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
+ if node.Type == blackfriday.Table {
+ out(w, tablePreprocessor+"\n")
+ return blackfriday.Terminate
+ }
+ return blackfriday.GoToNext
+ })
+
// disable hyphenation
out(w, ".nh\n")
}
@@ -322,6 +334,28 @@ func out(w io.Writer, output string) {
}
func escapeSpecialChars(w io.Writer, text []byte) {
+ scanner := bufio.NewScanner(bytes.NewReader(text))
+
+ // count the number of lines in the text
+ // we need to know this to avoid adding a newline after the last line
+ n := bytes.Count(text, []byte{'\n'})
+ idx := 0
+
+ for scanner.Scan() {
+ dt := scanner.Bytes()
+ if idx < n {
+ idx++
+ dt = append(dt, '\n')
+ }
+ escapeSpecialCharsLine(w, dt)
+ }
+
+ if err := scanner.Err(); err != nil {
+ panic(err)
+ }
+}
+
+func escapeSpecialCharsLine(w io.Writer, text []byte) {
for i := 0; i < len(text); i++ {
// escape initial apostrophe or period
if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go
index a1819b16a..2e6eca448 100644
--- a/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go
+++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go
@@ -167,6 +167,19 @@ func Marshal(v any) ([]byte, error) {
return buf, nil
}
+func MarshalEscaped(v any, escape bool) ([]byte, error) {
+ e := newEncodeState()
+ defer encodeStatePool.Put(e)
+
+ err := e.marshal(v, encOpts{escapeHTML: escape})
+ if err != nil {
+ return nil, err
+ }
+ buf := append([]byte(nil), e.Bytes()...)
+
+ return buf, nil
+}
+
// MarshalIndent is like Marshal but applies Indent to format the output.
// Each JSON element in the output will begin on a new line beginning with prefix
// followed by one or more copies of indent according to the indentation nesting.
diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go
index 1442ef29e..5598ce11f 100644
--- a/vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go
+++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go
@@ -6,7 +6,7 @@ package json
import (
"bytes"
- "errors"
+ "encoding/json"
"io"
)
@@ -259,27 +259,7 @@ func (enc *Encoder) SetEscapeHTML(on bool) {
// RawMessage is a raw encoded JSON value.
// It implements Marshaler and Unmarshaler and can
// be used to delay JSON decoding or precompute a JSON encoding.
-type RawMessage []byte
-
-// MarshalJSON returns m as the JSON encoding of m.
-func (m RawMessage) MarshalJSON() ([]byte, error) {
- if m == nil {
- return []byte("null"), nil
- }
- return m, nil
-}
-
-// UnmarshalJSON sets *m to a copy of data.
-func (m *RawMessage) UnmarshalJSON(data []byte) error {
- if m == nil {
- return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
- }
- *m = append((*m)[0:0], data...)
- return nil
-}
-
-var _ Marshaler = (*RawMessage)(nil)
-var _ Unmarshaler = (*RawMessage)(nil)
+type RawMessage = json.RawMessage
// A Token holds a value of one of these types:
//
diff --git a/vendor/github.com/evanphx/json-patch/v5/merge.go b/vendor/github.com/evanphx/json-patch/v5/merge.go
index bbe9f85f2..f79caf313 100644
--- a/vendor/github.com/evanphx/json-patch/v5/merge.go
+++ b/vendor/github.com/evanphx/json-patch/v5/merge.go
@@ -10,26 +10,26 @@ import (
"github.com/evanphx/json-patch/v5/internal/json"
)
-func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
- curDoc, err := cur.intoDoc()
+func merge(cur, patch *lazyNode, mergeMerge bool, options *ApplyOptions) *lazyNode {
+ curDoc, err := cur.intoDoc(options)
if err != nil {
- pruneNulls(patch)
+ pruneNulls(patch, options)
return patch
}
- patchDoc, err := patch.intoDoc()
+ patchDoc, err := patch.intoDoc(options)
if err != nil {
return patch
}
- mergeDocs(curDoc, patchDoc, mergeMerge)
+ mergeDocs(curDoc, patchDoc, mergeMerge, options)
return cur
}
-func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
+func mergeDocs(doc, patch *partialDoc, mergeMerge bool, options *ApplyOptions) {
for k, v := range patch.obj {
if v == nil {
if mergeMerge {
@@ -45,55 +45,55 @@ func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
}
doc.obj[k] = nil
} else {
- _ = doc.remove(k, &ApplyOptions{})
+ _ = doc.remove(k, options)
}
} else {
cur, ok := doc.obj[k]
if !ok || cur == nil {
if !mergeMerge {
- pruneNulls(v)
+ pruneNulls(v, options)
}
- _ = doc.set(k, v, &ApplyOptions{})
+ _ = doc.set(k, v, options)
} else {
- _ = doc.set(k, merge(cur, v, mergeMerge), &ApplyOptions{})
+ _ = doc.set(k, merge(cur, v, mergeMerge, options), options)
}
}
}
}
-func pruneNulls(n *lazyNode) {
- sub, err := n.intoDoc()
+func pruneNulls(n *lazyNode, options *ApplyOptions) {
+ sub, err := n.intoDoc(options)
if err == nil {
- pruneDocNulls(sub)
+ pruneDocNulls(sub, options)
} else {
ary, err := n.intoAry()
if err == nil {
- pruneAryNulls(ary)
+ pruneAryNulls(ary, options)
}
}
}
-func pruneDocNulls(doc *partialDoc) *partialDoc {
+func pruneDocNulls(doc *partialDoc, options *ApplyOptions) *partialDoc {
for k, v := range doc.obj {
if v == nil {
_ = doc.remove(k, &ApplyOptions{})
} else {
- pruneNulls(v)
+ pruneNulls(v, options)
}
}
return doc
}
-func pruneAryNulls(ary *partialArray) *partialArray {
+func pruneAryNulls(ary *partialArray, options *ApplyOptions) *partialArray {
newAry := []*lazyNode{}
for _, v := range ary.nodes {
if v != nil {
- pruneNulls(v)
+ pruneNulls(v, options)
}
newAry = append(newAry, v)
}
@@ -128,11 +128,17 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
return nil, errBadJSONPatch
}
- doc := &partialDoc{}
+ options := NewApplyOptions()
+
+ doc := &partialDoc{
+ opts: options,
+ }
docErr := doc.UnmarshalJSON(docData)
- patch := &partialDoc{}
+ patch := &partialDoc{
+ opts: options,
+ }
patchErr := patch.UnmarshalJSON(patchData)
@@ -158,7 +164,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
if mergeMerge {
doc = patch
} else {
- doc = pruneDocNulls(patch)
+ doc = pruneDocNulls(patch, options)
}
} else {
patchAry := &partialArray{}
@@ -172,7 +178,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
return nil, errBadJSONPatch
}
- pruneAryNulls(patchAry)
+ pruneAryNulls(patchAry, options)
out, patchErr := json.Marshal(patchAry.nodes)
@@ -183,7 +189,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
return out, nil
}
} else {
- mergeDocs(doc, patch, mergeMerge)
+ mergeDocs(doc, patch, mergeMerge, options)
}
return json.Marshal(doc)
diff --git a/vendor/github.com/evanphx/json-patch/v5/patch.go b/vendor/github.com/evanphx/json-patch/v5/patch.go
index a3bbf1c73..7a7f71c8b 100644
--- a/vendor/github.com/evanphx/json-patch/v5/patch.go
+++ b/vendor/github.com/evanphx/json-patch/v5/patch.go
@@ -38,6 +38,8 @@ var (
ErrInvalid = errors.New("invalid state detected")
ErrInvalidIndex = errors.New("invalid index referenced")
+ ErrExpectedObject = errors.New("invalid value, expected object")
+
rawJSONArray = []byte("[]")
rawJSONObject = []byte("{}")
rawJSONNull = []byte("null")
@@ -60,6 +62,8 @@ type partialDoc struct {
self *lazyNode
keys []string
obj map[string]*lazyNode
+
+ opts *ApplyOptions
}
type partialArray struct {
@@ -90,6 +94,8 @@ type ApplyOptions struct {
// EnsurePathExistsOnAdd instructs json-patch to recursively create the missing parts of path on "add" operation.
// Default to false.
EnsurePathExistsOnAdd bool
+
+ EscapeHTML bool
}
// NewApplyOptions creates a default set of options for calls to ApplyWithOptions.
@@ -99,6 +105,7 @@ func NewApplyOptions() *ApplyOptions {
AccumulatedCopySizeLimit: AccumulatedCopySizeLimit,
AllowMissingPathOnRemove: false,
EnsurePathExistsOnAdd: false,
+ EscapeHTML: true,
}
}
@@ -134,16 +141,28 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error {
}
func (n *partialDoc) TrustMarshalJSON(buf *bytes.Buffer) error {
+ if n.obj == nil {
+ return ErrExpectedObject
+ }
+
if err := buf.WriteByte('{'); err != nil {
return err
}
+ escaped := true
+
+ // n.opts should always be set, but in case we missed a case,
+ // guard.
+ if n.opts != nil {
+ escaped = n.opts.EscapeHTML
+ }
+
for i, k := range n.keys {
if i > 0 {
if err := buf.WriteByte(','); err != nil {
return err
}
}
- key, err := json.Marshal(k)
+ key, err := json.MarshalEscaped(k, escaped)
if err != nil {
return err
}
@@ -153,7 +172,7 @@ func (n *partialDoc) TrustMarshalJSON(buf *bytes.Buffer) error {
if err := buf.WriteByte(':'); err != nil {
return err
}
- value, err := json.Marshal(n.obj[k])
+ value, err := json.MarshalEscaped(n.obj[k], escaped)
if err != nil {
return err
}
@@ -194,11 +213,11 @@ func (n *partialArray) RedirectMarshalJSON() (interface{}, error) {
return n.nodes, nil
}
-func deepCopy(src *lazyNode) (*lazyNode, int, error) {
+func deepCopy(src *lazyNode, options *ApplyOptions) (*lazyNode, int, error) {
if src == nil {
return nil, 0, nil
}
- a, err := json.Marshal(src)
+ a, err := json.MarshalEscaped(src, options.EscapeHTML)
if err != nil {
return nil, 0, err
}
@@ -216,7 +235,7 @@ func (n *lazyNode) nextByte() byte {
return s[0]
}
-func (n *lazyNode) intoDoc() (*partialDoc, error) {
+func (n *lazyNode) intoDoc(options *ApplyOptions) (*partialDoc, error) {
if n.which == eDoc {
return n.doc, nil
}
@@ -235,6 +254,7 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) {
return nil, ErrInvalid
}
+ n.doc.opts = options
if err != nil {
return nil, err
}
@@ -545,7 +565,7 @@ func findObject(pd *container, path string, options *ApplyOptions) (container, s
return nil, ""
}
} else {
- doc, err = next.intoDoc()
+ doc, err = next.intoDoc(options)
if err != nil {
return nil, ""
@@ -557,6 +577,10 @@ func findObject(pd *container, path string, options *ApplyOptions) (container, s
}
func (d *partialDoc) set(key string, val *lazyNode, options *ApplyOptions) error {
+ if d.obj == nil {
+ return ErrExpectedObject
+ }
+
found := false
for _, k := range d.keys {
if k == key {
@@ -579,6 +603,11 @@ func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) {
if key == "" {
return d.self, nil
}
+
+ if d.obj == nil {
+ return nil, ErrExpectedObject
+ }
+
v, ok := d.obj[key]
if !ok {
return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key)
@@ -587,6 +616,10 @@ func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) {
}
func (d *partialDoc) remove(key string, options *ApplyOptions) error {
+ if d.obj == nil {
+ return ErrExpectedObject
+ }
+
_, ok := d.obj[key]
if !ok {
if options.AllowMissingPathOnRemove {
@@ -750,6 +783,7 @@ func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error {
} else {
pd = &partialDoc{
self: val,
+ opts: options,
}
}
@@ -855,7 +889,7 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
newNode := newLazyNode(newRawMessage(rawJSONObject))
doc.add(part, newNode, options)
- doc, err = newNode.intoDoc()
+ doc, err = newNode.intoDoc(options)
if err != nil {
return err
}
@@ -868,7 +902,7 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
return err
}
} else {
- doc, err = target.intoDoc()
+ doc, err = target.intoDoc(options)
if err != nil {
return err
@@ -954,6 +988,8 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro
if !val.tryAry() {
return errors.Wrapf(err, "replace operation value must be object or array")
}
+ } else {
+ val.doc.opts = options
}
}
@@ -1115,7 +1151,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, op
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
}
- valCopy, sz, err := deepCopy(val)
+ valCopy, sz, err := deepCopy(val, options)
if err != nil {
return errors.Wrapf(err, "error while performing deep copy")
}
@@ -1202,6 +1238,7 @@ func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyO
} else {
pd = &partialDoc{
self: self,
+ opts: options,
}
}
@@ -1238,11 +1275,18 @@ func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyO
}
}
- if indent != "" {
- return json.MarshalIndent(pd, "", indent)
+ data, err := json.MarshalEscaped(pd, options.EscapeHTML)
+ if err != nil {
+ return nil, err
+ }
+
+ if indent == "" {
+ return data, nil
}
- return json.Marshal(pd)
+ var buf bytes.Buffer
+ json.Indent(&buf, data, "", indent)
+ return buf.Bytes(), nil
}
// From http://tools.ietf.org/html/rfc6901#section-4 :
diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md
index 8969526a6..7c7f0c69c 100644
--- a/vendor/github.com/go-logr/logr/README.md
+++ b/vendor/github.com/go-logr/logr/README.md
@@ -1,6 +1,7 @@
# A minimal logging API for Go
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
+[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
logr offers an(other) opinion on how Go programs and libraries can do logging
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
index fb2f866f4..30568e768 100644
--- a/vendor/github.com/go-logr/logr/funcr/funcr.go
+++ b/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -236,15 +236,14 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
// implementation. It should be constructed with NewFormatter. Some of
// its methods directly implement logr.LogSink.
type Formatter struct {
- outputFormat outputFormat
- prefix string
- values []any
- valuesStr string
- parentValuesStr string
- depth int
- opts *Options
- group string // for slog groups
- groupDepth int
+ outputFormat outputFormat
+ prefix string
+ values []any
+ valuesStr string
+ depth int
+ opts *Options
+ groupName string // for slog groups
+ groups []groupDef
}
// outputFormat indicates which outputFormat to use.
@@ -257,6 +256,13 @@ const (
outputJSON
)
+// groupDef represents a saved group. The values may be empty, but we don't
+// know if we need to render the group until the final record is rendered.
+type groupDef struct {
+ name string
+ values string
+}
+
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
type PseudoStruct []any
@@ -264,76 +270,102 @@ type PseudoStruct []any
func (f Formatter) render(builtins, args []any) string {
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
if f.outputFormat == outputJSON {
- buf.WriteByte('{') // for the whole line
+ buf.WriteByte('{') // for the whole record
}
+ // Render builtins
vals := builtins
if hook := f.opts.RenderBuiltinsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
- f.flatten(buf, vals, false, false) // keys are ours, no need to escape
+ f.flatten(buf, vals, false) // keys are ours, no need to escape
continuing := len(builtins) > 0
- if f.parentValuesStr != "" {
- if continuing {
- buf.WriteByte(f.comma())
+ // Turn the inner-most group into a string
+ argsStr := func() string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ vals = args
+ if hook := f.opts.RenderArgsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
}
- buf.WriteString(f.parentValuesStr)
- continuing = true
- }
+ f.flatten(buf, vals, true) // escape user-provided keys
- groupDepth := f.groupDepth
- if f.group != "" {
- if f.valuesStr != "" || len(args) != 0 {
- if continuing {
- buf.WriteByte(f.comma())
- }
- buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
- buf.WriteByte(f.colon())
- buf.WriteByte('{') // for the group
- continuing = false
- } else {
- // The group was empty
- groupDepth--
+ return buf.String()
+ }()
+
+ // Render the stack of groups from the inside out.
+ bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr)
+ for i := len(f.groups) - 1; i >= 0; i-- {
+ grp := &f.groups[i]
+ if grp.values == "" && bodyStr == "" {
+ // no contents, so we must elide the whole group
+ continue
}
+ bodyStr = f.renderGroup(grp.name, grp.values, bodyStr)
}
- if f.valuesStr != "" {
+ if bodyStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
- buf.WriteString(f.valuesStr)
- continuing = true
+ buf.WriteString(bodyStr)
}
- vals = args
- if hook := f.opts.RenderArgsHook; hook != nil {
- vals = hook(f.sanitize(vals))
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('}') // for the whole record
}
- f.flatten(buf, vals, continuing, true) // escape user-provided keys
- for i := 0; i < groupDepth; i++ {
- buf.WriteByte('}') // for the groups
+ return buf.String()
+}
+
+// renderGroup returns a string representation of the named group with rendered
+// values and args. If the name is empty, this will return the values and args,
+// joined. If the name is not empty, this will return a single key-value pair,
+// where the value is a grouping of the values and args. If the values and
+// args are both empty, this will return an empty string, even if the name was
+// specified.
+func (f Formatter) renderGroup(name string, values string, args string) string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ needClosingBrace := false
+ if name != "" && (values != "" || args != "") {
+ buf.WriteString(f.quoted(name, true)) // escape user-provided keys
+ buf.WriteByte(f.colon())
+ buf.WriteByte('{')
+ needClosingBrace = true
}
- if f.outputFormat == outputJSON {
- buf.WriteByte('}') // for the whole line
+ continuing := false
+ if values != "" {
+ buf.WriteString(values)
+ continuing = true
+ }
+
+ if args != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(args)
+ }
+
+ if needClosingBrace {
+ buf.WriteByte('}')
}
return buf.String()
}
-// flatten renders a list of key-value pairs into a buffer. If continuing is
-// true, it assumes that the buffer has previous values and will emit a
-// separator (which depends on the output format) before the first pair it
-// writes. If escapeKeys is true, the keys are assumed to have
-// non-JSON-compatible characters in them and must be evaluated for escapes.
+// flatten renders a list of key-value pairs into a buffer. If escapeKeys is
+// true, the keys are assumed to have non-JSON-compatible characters in them
+// and must be evaluated for escapes.
//
// This function returns a potentially modified version of kvList, which
// ensures that there is a value for every key (adding a value if needed) and
// that each key is a string (substituting a key if needed).
-func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any {
+func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any {
// This logic overlaps with sanitize() but saves one type-cast per key,
// which can be measurable.
if len(kvList)%2 != 0 {
@@ -354,7 +386,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
}
v := kvList[i+1]
- if i > 0 || continuing {
+ if i > 0 {
if f.outputFormat == outputJSON {
buf.WriteByte(f.comma())
} else {
@@ -766,46 +798,17 @@ func (f Formatter) sanitize(kvList []any) []any {
// startGroup opens a new group scope (basically a sub-struct), which locks all
// the current saved values and starts them anew. This is needed to satisfy
// slog.
-func (f *Formatter) startGroup(group string) {
+func (f *Formatter) startGroup(name string) {
// Unnamed groups are just inlined.
- if group == "" {
+ if name == "" {
return
}
- // Any saved values can no longer be changed.
- buf := bytes.NewBuffer(make([]byte, 0, 1024))
- continuing := false
-
- if f.parentValuesStr != "" {
- buf.WriteString(f.parentValuesStr)
- continuing = true
- }
-
- if f.group != "" && f.valuesStr != "" {
- if continuing {
- buf.WriteByte(f.comma())
- }
- buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
- buf.WriteByte(f.colon())
- buf.WriteByte('{') // for the group
- continuing = false
- }
-
- if f.valuesStr != "" {
- if continuing {
- buf.WriteByte(f.comma())
- }
- buf.WriteString(f.valuesStr)
- }
-
- // NOTE: We don't close the scope here - that's done later, when a log line
- // is actually rendered (because we have N scopes to close).
-
- f.parentValuesStr = buf.String()
+ n := len(f.groups)
+ f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr})
// Start collecting new values.
- f.group = group
- f.groupDepth++
+ f.groupName = name
f.valuesStr = ""
f.values = nil
}
@@ -900,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) {
// Pre-render values, so we don't have to do it on each Info/Error call.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
- f.flatten(buf, vals, false, true) // escape user-provided keys
+ f.flatten(buf, vals, true) // escape user-provided keys
f.valuesStr = buf.String()
}
diff --git a/vendor/github.com/go-resty/resty/v2/README.md b/vendor/github.com/go-resty/resty/v2/README.md
index ef6ed1d55..eccca8bce 100644
--- a/vendor/github.com/go-resty/resty/v2/README.md
+++ b/vendor/github.com/go-resty/resty/v2/README.md
@@ -4,7 +4,7 @@
Features section describes in detail about Resty capabilities
-
+
Resty Communication Channels
@@ -13,7 +13,7 @@
## News
- * v2.13.0 [released](https://github.com/go-resty/resty/releases/tag/v2.13.0) and tagged on May 08, 2024.
+ * v2.13.1 [released](https://github.com/go-resty/resty/releases/tag/v2.13.1) and tagged on May 10, 2024.
* v2.0.0 [released](https://github.com/go-resty/resty/releases/tag/v2.0.0) and tagged on Jul 16, 2019.
* v1.12.0 [released](https://github.com/go-resty/resty/releases/tag/v1.12.0) and tagged on Feb 27, 2019.
* v1.0 released and tagged on Sep 25, 2017. - Resty's first version was released on Sep 15, 2015 then it grew gradually as a very handy and helpful library. Its been a two years since first release. I'm very thankful to Resty users and its [contributors](https://github.com/go-resty/resty/graphs/contributors).
diff --git a/vendor/github.com/go-resty/resty/v2/resty.go b/vendor/github.com/go-resty/resty/v2/resty.go
index 985cff250..f8becec6c 100644
--- a/vendor/github.com/go-resty/resty/v2/resty.go
+++ b/vendor/github.com/go-resty/resty/v2/resty.go
@@ -14,7 +14,7 @@ import (
)
// Version # of resty
-const Version = "2.12.0"
+const Version = "2.13.1"
// New method creates a new Resty client.
func New() *Client {
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
index fb1478c3b..4021b96cc 100644
--- a/vendor/github.com/go-sql-driver/mysql/AUTHORS
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -13,6 +13,7 @@
Aaron Hopkins
Achille Roussel
+Aidan
Alex Snast
Alexey Palazhchenko
Andrew Reid
@@ -20,12 +21,14 @@ Animesh Ray
Arne Hormann
Ariel Mashraki
Asta Xie
+Brian Hendriks
Bulat Gaifullin
Caine Jette
Carlos Nieto
Chris Kirkland
Chris Moos
Craig Wilson
+Daemonxiao <735462752 at qq.com>
Daniel Montoya
Daniel Nichter
Daniël van Eeden
@@ -33,9 +36,11 @@ Dave Protasowski
DisposaBoy
Egor Smolyakov
Erwan Martin
+Evan Elias
Evan Shaw
Frederick Mayle
Gustavo Kristic
+Gusted
Hajime Nakagami
Hanno Braun
Henri Yandell
@@ -47,8 +52,11 @@ INADA Naoki
Jacek Szwec
James Harr
Janek Vedock
+Jason Ng
+Jean-Yves Pellé
Jeff Hodges
Jeffrey Charles
+Jennifer Purevsuren
Jerome Meyer
Jiajia Zhong
Jian Zhen
@@ -74,9 +82,11 @@ Maciej Zimnoch
Michael Woolnough
Nathanial Murphy
Nicola Peduzzi
+Oliver Bone
Olivier Mengué
oscarzhao
Paul Bonser
+Paulius Lozys
Peter Schultz
Phil Porada
Rebecca Chin
@@ -95,6 +105,7 @@ Stan Putrya
Stanley Gunawan
Steven Hartland
Tan Jinhua <312841925 at qq.com>
+Tetsuro Aoki
Thomas Wodarek
Tim Ruffles
Tom Jenkinson
@@ -104,6 +115,7 @@ Xiangyu Hu
Xiaobing Jiang
Xiuming Chen
Xuehong Chan
+Zhang Xiang
Zhenye Xie
Zhixin Wen
Ziheng Lyu
@@ -113,14 +125,18 @@ Ziheng Lyu
Barracuda Networks, Inc.
Counting Ltd.
DigitalOcean Inc.
+Dolthub Inc.
dyves labs AG
Facebook Inc.
GitHub Inc.
Google Inc.
InfoSum Ltd.
Keybase Inc.
+Microsoft Corp.
Multiplay Ltd.
Percona LLC
+PingCAP Inc.
Pivotal Inc.
+Shattered Silicon Ltd.
Stripe Inc.
Zendesk Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
index 5166e4adb..0c9bd9b10 100644
--- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
+++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
@@ -1,3 +1,45 @@
+## Version 1.8.1 (2024-03-26)
+
+Bugfixes:
+
+- fix race condition when context is canceled in [#1562](https://github.com/go-sql-driver/mysql/pull/1562) and [#1570](https://github.com/go-sql-driver/mysql/pull/1570)
+
+## Version 1.8.0 (2024-03-09)
+
+Major Changes:
+
+- Use `SET NAMES charset COLLATE collation`. by @methane in [#1437](https://github.com/go-sql-driver/mysql/pull/1437)
+ - Older go-mysql-driver used `collation_id` in the handshake packet. But it caused collation mismatch in some situation.
+ - If you don't specify charset nor collation, go-mysql-driver sends `SET NAMES utf8mb4` for new connection. This uses server's default collation for utf8mb4.
+ - If you specify charset, go-mysql-driver sends `SET NAMES `. This uses the server's default collation for ``.
+ - If you specify collation and/or charset, go-mysql-driver sends `SET NAMES charset COLLATE collation`.
+- PathEscape dbname in DSN. by @methane in [#1432](https://github.com/go-sql-driver/mysql/pull/1432)
+ - This is backward incompatible in rare case. Check your DSN.
+- Drop Go 1.13-17 support by @methane in [#1420](https://github.com/go-sql-driver/mysql/pull/1420)
+ - Use Go 1.18+
+- Parse numbers on text protocol too by @methane in [#1452](https://github.com/go-sql-driver/mysql/pull/1452)
+ - When text protocol is used, go-mysql-driver passed bare `[]byte` to database/sql for avoid unnecessary allocation and conversion.
+ - If user specified `*any` to `Scan()`, database/sql passed the `[]byte` into the target variable.
+ - This confused users because most user doesn't know when text/binary protocol used.
+ - go-mysql-driver 1.8 converts integer/float values into int64/double even in text protocol. This doesn't increase allocation compared to `[]byte` and conversion cost is negatable.
+- New options start using the Functional Option Pattern to avoid increasing technical debt in the Config object. Future version may introduce Functional Option for existing options, but not for now.
+ - Make TimeTruncate functional option by @methane in [1552](https://github.com/go-sql-driver/mysql/pull/1552)
+ - Add BeforeConnect callback to configuration object by @ItalyPaleAle in [#1469](https://github.com/go-sql-driver/mysql/pull/1469)
+
+
+Other changes:
+
+- Adding DeregisterDialContext to prevent memory leaks with dialers we don't need anymore by @jypelle in https://github.com/go-sql-driver/mysql/pull/1422
+- Make logger configurable per connection by @frozenbonito in https://github.com/go-sql-driver/mysql/pull/1408
+- Fix ColumnType.DatabaseTypeName for mediumint unsigned by @evanelias in https://github.com/go-sql-driver/mysql/pull/1428
+- Add connection attributes by @Daemonxiao in https://github.com/go-sql-driver/mysql/pull/1389
+- Stop `ColumnTypeScanType()` from returning `sql.RawBytes` by @methane in https://github.com/go-sql-driver/mysql/pull/1424
+- Exec() now provides access to status of multiple statements. by @mherr-google in https://github.com/go-sql-driver/mysql/pull/1309
+- Allow to change (or disable) the default driver name for registration by @dolmen in https://github.com/go-sql-driver/mysql/pull/1499
+- Add default connection attribute '_server_host' by @oblitorum in https://github.com/go-sql-driver/mysql/pull/1506
+- QueryUnescape DSN ConnectionAttribute value by @zhangyangyu in https://github.com/go-sql-driver/mysql/pull/1470
+- Add client_ed25519 authentication by @Gusted in https://github.com/go-sql-driver/mysql/pull/1518
+
## Version 1.7.1 (2023-04-25)
Changes:
@@ -162,7 +204,7 @@ New Features:
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
- Support for returning table alias on Columns() (#289, #359, #382)
- - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
+ - Placeholder interpolation, can be activated with the DSN parameter `interpolateParams=true` (#309, #318, #490)
- Support for uint64 parameters with high bit set (#332, #345)
- Cleartext authentication plugin support (#327)
- Exported ParseDSN function and the Config struct (#403, #419, #429)
@@ -206,7 +248,7 @@ Changes:
- Also exported the MySQLWarning type
- mysqlConn.Close returns the first error encountered instead of ignoring all errors
- writePacket() automatically writes the packet size to the header
- - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
+ - readPacket() uses an iterative approach instead of the recursive approach to merge split packets
New Features:
@@ -254,7 +296,7 @@ Bugfixes:
- Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
- Convert to DB timezone when inserting `time.Time`
- - Splitted packets (more than 16MB) are now merged correctly
+ - Split packets (more than 16MB) are now merged correctly
- Fixed false positive `io.EOF` errors when the data was fully read
- Avoid panics on reuse of closed connections
- Fixed empty string producing false nil values
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
index 3b5d229aa..4968cb060 100644
--- a/vendor/github.com/go-sql-driver/mysql/README.md
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -40,15 +40,23 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
* Optional placeholder interpolation
## Requirements
- * Go 1.13 or higher. We aim to support the 3 latest versions of Go.
- * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
+
+* Go 1.19 or higher. We aim to support the 3 latest versions of Go.
+* MySQL (5.7+) and MariaDB (10.3+) are supported.
+* [TiDB](https://github.com/pingcap/tidb) is supported by PingCAP.
+ * Do not ask questions about TiDB in our issue tracker or forum.
+ * [Document](https://docs.pingcap.com/tidb/v6.1/dev-guide-sample-application-golang)
+ * [Forum](https://ask.pingcap.com/)
+* go-mysql would work with Percona Server, Google CloudSQL or Sphinx (2.2.3+).
+ * Maintainers won't support them. Do not expect issues are investigated and resolved by maintainers.
+ * Investigate issues yourself and please send a pull request to fix it.
---------------------------------------
## Installation
Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
```bash
-$ go get -u github.com/go-sql-driver/mysql
+go get -u github.com/go-sql-driver/mysql
```
Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
@@ -114,6 +122,12 @@ This has the same effect as an empty DSN string:
```
+`dbname` is escaped by [PathEscape()](https://pkg.go.dev/net/url#PathEscape) since v1.8.0. If your database name is `dbname/withslash`, it becomes:
+
+```
+/dbname%2Fwithslash
+```
+
Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
#### Password
@@ -121,7 +135,7 @@ Passwords can consist of any character. Escaping is **not** necessary.
#### Protocol
See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
-In general you should use an Unix domain socket if available and TCP otherwise for best performance.
+In general you should use a Unix domain socket if available and TCP otherwise for best performance.
#### Address
For TCP and UDP networks, addresses have the form `host[:port]`.
@@ -145,7 +159,7 @@ Default: false
```
`allowAllFiles=true` disables the file allowlist for `LOAD DATA LOCAL INFILE` and allows *all* files.
-[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
+[*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local)
##### `allowCleartextPasswords`
@@ -194,10 +208,9 @@ Valid Values:
Default: none
```
-Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
+Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset fails. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
-Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
-Unless you need the fallback behavior, please use `collation` instead.
+See also [Unicode Support](#unicode-support).
##### `checkConnLiveness`
@@ -226,6 +239,7 @@ The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You s
Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
+See also [Unicode Support](#unicode-support).
##### `clientFoundRows`
@@ -279,6 +293,15 @@ Note that this sets the location for time.Time values but does not change MySQL'
Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+##### `timeTruncate`
+
+```
+Type: duration
+Default: 0
+```
+
+[Truncate time values](https://pkg.go.dev/time#Duration.Truncate) to the specified duration. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
##### `maxAllowedPacket`
```
Type: decimal number
@@ -295,9 +318,25 @@ Valid Values: true, false
Default: false
```
-Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
+Allow multiple statements in one query. This can be used to bach multiple queries. Use [Rows.NextResultSet()](https://pkg.go.dev/database/sql#Rows.NextResultSet) to get result of the second and subsequent queries.
+
+When `multiStatements` is used, `?` parameters must only be used in the first statement. [interpolateParams](#interpolateparams) can be used to avoid this limitation unless prepared statement is used explicitly.
+
+It's possible to access the last inserted ID and number of affected rows for multiple statements by using `sql.Conn.Raw()` and the `mysql.Result`. For example:
-When `multiStatements` is used, `?` parameters must only be used in the first statement.
+```go
+conn, _ := db.Conn(ctx)
+conn.Raw(func(conn any) error {
+ ex := conn.(driver.Execer)
+ res, err := ex.Exec(`
+ UPDATE point SET x = 1 WHERE y = 2;
+ UPDATE point SET x = 2 WHERE y = 3;
+ `, nil)
+ // Both slices have 2 elements.
+ log.Print(res.(mysql.Result).AllRowsAffected())
+ log.Print(res.(mysql.Result).AllLastInsertIds())
+})
+```
##### `parseTime`
@@ -393,6 +432,15 @@ Default: 0
I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+##### `connectionAttributes`
+
+```
+Type: comma-delimited string of user-defined "key:value" pairs
+Valid Values: (:,:,...)
+Default: none
+```
+
+[Connection attributes](https://dev.mysql.com/doc/refman/8.0/en/performance-schema-connection-attribute-tables.html) are key-value pairs that application programs can pass to the server at connect time.
##### System Variables
@@ -465,7 +513,7 @@ user:password@/
The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
## `ColumnType` Support
-This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `BIGINT`.
+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `MEDIUMINT`, `BIGINT`.
## `context.Context` Support
Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
@@ -478,7 +526,7 @@ For this feature you need direct access to the package. Therefore you must chang
import "github.com/go-sql-driver/mysql"
```
-Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
+Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local)).
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
@@ -496,9 +544,11 @@ However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` v
### Unicode support
Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default.
-Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
+Other charsets / collations can be set using the [`charset`](#charset) or [`collation`](#collation) DSN parameter.
-Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
+- When only the `charset` is specified, the `SET NAMES ` query is sent and the server's default collation is used.
+- When both the `charset` and `collation` are specified, the `SET NAMES COLLATE ` query is sent.
+- When only the `collation` is specified, the collation is specified in the protocol handshake and the `SET NAMES` query is not sent. This can save one roundtrip, but note that the server may ignore the specified collation silently and use the server's default charset/collation instead.
See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support.
diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go
index 1ff203e57..74e1bd03e 100644
--- a/vendor/github.com/go-sql-driver/mysql/auth.go
+++ b/vendor/github.com/go-sql-driver/mysql/auth.go
@@ -13,10 +13,13 @@ import (
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
+ "crypto/sha512"
"crypto/x509"
"encoding/pem"
"fmt"
"sync"
+
+ "filippo.io/edwards25519"
)
// server pub keys registry
@@ -33,7 +36,7 @@ var (
// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
// after registering it and may not be modified.
//
-// data, err := ioutil.ReadFile("mykey.pem")
+// data, err := os.ReadFile("mykey.pem")
// if err != nil {
// log.Fatal(err)
// }
@@ -225,6 +228,44 @@ func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte,
return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
}
+// authEd25519 does ed25519 authentication used by MariaDB.
+func authEd25519(scramble []byte, password string) ([]byte, error) {
+ // Derived from https://github.com/MariaDB/server/blob/d8e6bb00888b1f82c031938f4c8ac5d97f6874c3/plugin/auth_ed25519/ref10/sign.c
+ // Code style is from https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/crypto/ed25519/ed25519.go;l=207
+ h := sha512.Sum512([]byte(password))
+
+ s, err := edwards25519.NewScalar().SetBytesWithClamping(h[:32])
+ if err != nil {
+ return nil, err
+ }
+ A := (&edwards25519.Point{}).ScalarBaseMult(s)
+
+ mh := sha512.New()
+ mh.Write(h[32:])
+ mh.Write(scramble)
+ messageDigest := mh.Sum(nil)
+ r, err := edwards25519.NewScalar().SetUniformBytes(messageDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ R := (&edwards25519.Point{}).ScalarBaseMult(r)
+
+ kh := sha512.New()
+ kh.Write(R.Bytes())
+ kh.Write(A.Bytes())
+ kh.Write(scramble)
+ hramDigest := kh.Sum(nil)
+ k, err := edwards25519.NewScalar().SetUniformBytes(hramDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ S := k.MultiplyAdd(k, s, r)
+
+ return append(R.Bytes(), S.Bytes()...), nil
+}
+
func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
if err != nil {
@@ -290,8 +331,14 @@ func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
return enc, err
+ case "client_ed25519":
+ if len(authData) != 32 {
+ return nil, ErrMalformPkt
+ }
+ return authEd25519(authData, mc.cfg.Passwd)
+
default:
- errLog.Print("unknown auth plugin:", plugin)
+ mc.log("unknown auth plugin:", plugin)
return nil, ErrUnknownPlugin
}
}
@@ -338,7 +385,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
switch plugin {
- // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
+ // https://dev.mysql.com/blog-archive/preparing-your-community-connector-for-mysql-8-part-2-sha256/
case "caching_sha2_password":
switch len(authData) {
case 0:
@@ -346,7 +393,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
case 1:
switch authData[0] {
case cachingSha2PasswordFastAuthSuccess:
- if err = mc.readResultOK(); err == nil {
+ if err = mc.resultUnchanged().readResultOK(); err == nil {
return nil // auth successful
}
@@ -376,13 +423,13 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
}
if data[0] != iAuthMoreData {
- return fmt.Errorf("unexpect resp from server for caching_sha2_password perform full authentication")
+ return fmt.Errorf("unexpected resp from server for caching_sha2_password, perform full authentication")
}
// parse public key
block, rest := pem.Decode(data[1:])
if block == nil {
- return fmt.Errorf("No Pem data found, data: %s", rest)
+ return fmt.Errorf("no pem data found, data: %s", rest)
}
pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
@@ -397,7 +444,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
return err
}
}
- return mc.readResultOK()
+ return mc.resultUnchanged().readResultOK()
default:
return ErrMalformPkt
@@ -426,7 +473,7 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
if err != nil {
return err
}
- return mc.readResultOK()
+ return mc.resultUnchanged().readResultOK()
}
default:
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
index 295bfbe52..1cdf97b67 100644
--- a/vendor/github.com/go-sql-driver/mysql/collations.go
+++ b/vendor/github.com/go-sql-driver/mysql/collations.go
@@ -9,7 +9,7 @@
package mysql
const defaultCollation = "utf8mb4_general_ci"
-const binaryCollation = "binary"
+const binaryCollationID = 63
// A list of available collations mapped to the internal ID.
// To update this map use the following MySQL query:
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
index 947a883e3..eff978d93 100644
--- a/vendor/github.com/go-sql-driver/mysql/connection.go
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -23,10 +23,10 @@ import (
type mysqlConn struct {
buf buffer
netConn net.Conn
- rawConn net.Conn // underlying connection when netConn is TLS connection.
- affectedRows uint64
- insertId uint64
+ rawConn net.Conn // underlying connection when netConn is TLS connection.
+ result mysqlResult // managed by clearResult() and handleOkPacket().
cfg *Config
+ connector *connector
maxAllowedPacket int
maxWriteSize int
writeTimeout time.Duration
@@ -34,7 +34,6 @@ type mysqlConn struct {
status statusFlag
sequence uint8
parseTime bool
- reset bool // set when the Go SQL package calls ResetSession
// for context support (Go 1.8+)
watching bool
@@ -45,17 +44,27 @@ type mysqlConn struct {
closed atomicBool // set when conn is closed, before closech is closed
}
+// Helper function to call per-connection logger.
+func (mc *mysqlConn) log(v ...any) {
+ mc.cfg.Logger.Print(v...)
+}
+
// Handles parameters set in DSN after the connection is established
func (mc *mysqlConn) handleParams() (err error) {
var cmdSet strings.Builder
+
for param, val := range mc.cfg.Params {
switch param {
// Charset: character_set_connection, character_set_client, character_set_results
case "charset":
charsets := strings.Split(val, ",")
- for i := range charsets {
+ for _, cs := range charsets {
// ignore errors here - a charset may not exist
- err = mc.exec("SET NAMES " + charsets[i])
+ if mc.cfg.Collation != "" {
+ err = mc.exec("SET NAMES " + cs + " COLLATE " + mc.cfg.Collation)
+ } else {
+ err = mc.exec("SET NAMES " + cs)
+ }
if err == nil {
break
}
@@ -68,7 +77,7 @@ func (mc *mysqlConn) handleParams() (err error) {
default:
if cmdSet.Len() == 0 {
// Heuristic: 29 chars for each other key=value to reduce reallocations
- cmdSet.Grow(4 + len(param) + 1 + len(val) + 30*(len(mc.cfg.Params)-1))
+ cmdSet.Grow(4 + len(param) + 3 + len(val) + 30*(len(mc.cfg.Params)-1))
cmdSet.WriteString("SET ")
} else {
cmdSet.WriteString(", ")
@@ -105,7 +114,7 @@ func (mc *mysqlConn) Begin() (driver.Tx, error) {
func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
var q string
@@ -128,7 +137,7 @@ func (mc *mysqlConn) Close() (err error) {
}
mc.cleanup()
-
+ mc.clearResult()
return
}
@@ -143,12 +152,16 @@ func (mc *mysqlConn) cleanup() {
// Makes cleanup idempotent
close(mc.closech)
- if mc.netConn == nil {
+ conn := mc.rawConn
+ if conn == nil {
return
}
- if err := mc.netConn.Close(); err != nil {
- errLog.Print(err)
+ if err := conn.Close(); err != nil {
+ mc.log(err)
}
+ // This function can be called from multiple goroutines.
+ // So we can not mc.clearResult() here.
+ // Caller should do it if they are in safe goroutine.
}
func (mc *mysqlConn) error() error {
@@ -163,14 +176,14 @@ func (mc *mysqlConn) error() error {
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := mc.writeCommandPacketStr(comStmtPrepare, query)
if err != nil {
// STMT_PREPARE is safe to retry. So we can return ErrBadConn here.
- errLog.Print(err)
+ mc.log(err)
return nil, driver.ErrBadConn
}
@@ -204,7 +217,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
buf, err := mc.buf.takeCompleteBuffer()
if err != nil {
// can not take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return "", ErrInvalidConn
}
buf = buf[:0]
@@ -246,7 +259,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
buf = append(buf, "'0000-00-00'"...)
} else {
buf = append(buf, '\'')
- buf, err = appendDateTime(buf, v.In(mc.cfg.Loc))
+ buf, err = appendDateTime(buf, v.In(mc.cfg.Loc), mc.cfg.timeTruncate)
if err != nil {
return "", err
}
@@ -296,7 +309,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
if len(args) != 0 {
@@ -310,28 +323,25 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
}
query = prepared
}
- mc.affectedRows = 0
- mc.insertId = 0
err := mc.exec(query)
if err == nil {
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, err
+ copied := mc.result
+ return &copied, err
}
return nil, mc.markBadConn(err)
}
// Internal function to execute commands
func (mc *mysqlConn) exec(query string) error {
+ handleOk := mc.clearResult()
// Send command
if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
return mc.markBadConn(err)
}
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err != nil {
return err
}
@@ -348,7 +358,7 @@ func (mc *mysqlConn) exec(query string) error {
}
}
- return mc.discardResults()
+ return handleOk.discardResults()
}
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
@@ -356,8 +366,10 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
}
func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+ handleOk := mc.clearResult()
+
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
if len(args) != 0 {
@@ -376,7 +388,7 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error)
if err == nil {
// Read Result
var resLen int
- resLen, err = mc.readResultSetHeaderPacket()
+ resLen, err = handleOk.readResultSetHeaderPacket()
if err == nil {
rows := new(textRows)
rows.mc = mc
@@ -404,12 +416,13 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error)
// The returned byte slice is only valid until the next read
func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
// Send command
+ handleOk := mc.clearResult()
if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
return nil, err
}
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err == nil {
rows := new(textRows)
rows.mc = mc
@@ -451,7 +464,7 @@ func (mc *mysqlConn) finish() {
// Ping implements driver.Pinger interface
func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
if mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ mc.log(ErrInvalidConn)
return driver.ErrBadConn
}
@@ -460,11 +473,12 @@ func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
}
defer mc.finish()
+ handleOk := mc.clearResult()
if err = mc.writeCommandPacket(comPing); err != nil {
return mc.markBadConn(err)
}
- return mc.readResultOK()
+ return handleOk.readResultOK()
}
// BeginTx implements driver.ConnBeginTx interface
@@ -639,7 +653,31 @@ func (mc *mysqlConn) ResetSession(ctx context.Context) error {
if mc.closed.Load() {
return driver.ErrBadConn
}
- mc.reset = true
+
+ // Perform a stale connection check. We only perform this check for
+ // the first query on a connection that has been checked out of the
+ // connection pool: a fresh connection from the pool is more likely
+ // to be stale, and it has not performed any previous writes that
+ // could cause data corruption, so it's safe to return ErrBadConn
+ // if the check fails.
+ if mc.cfg.CheckConnLiveness {
+ conn := mc.netConn
+ if mc.rawConn != nil {
+ conn = mc.rawConn
+ }
+ var err error
+ if mc.cfg.ReadTimeout != 0 {
+ err = conn.SetReadDeadline(time.Now().Add(mc.cfg.ReadTimeout))
+ }
+ if err == nil {
+ err = connCheck(conn)
+ }
+ if err != nil {
+ mc.log("closing bad idle connection: ", err)
+ return driver.ErrBadConn
+ }
+ }
+
return nil
}
diff --git a/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/go-sql-driver/mysql/connector.go
index d567b4e4f..b67077596 100644
--- a/vendor/github.com/go-sql-driver/mysql/connector.go
+++ b/vendor/github.com/go-sql-driver/mysql/connector.go
@@ -12,10 +12,53 @@ import (
"context"
"database/sql/driver"
"net"
+ "os"
+ "strconv"
+ "strings"
)
type connector struct {
- cfg *Config // immutable private copy.
+ cfg *Config // immutable private copy.
+ encodedAttributes string // Encoded connection attributes.
+}
+
+func encodeConnectionAttributes(cfg *Config) string {
+ connAttrsBuf := make([]byte, 0)
+
+ // default connection attributes
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrClientName)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrClientNameValue)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrOS)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrOSValue)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPlatform)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPlatformValue)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPid)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, strconv.Itoa(os.Getpid()))
+ serverHost, _, _ := net.SplitHostPort(cfg.Addr)
+ if serverHost != "" {
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrServerHost)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, serverHost)
+ }
+
+ // user-defined connection attributes
+ for _, connAttr := range strings.Split(cfg.ConnectionAttributes, ",") {
+ k, v, found := strings.Cut(connAttr, ":")
+ if !found {
+ continue
+ }
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, k)
+ connAttrsBuf = appendLengthEncodedString(connAttrsBuf, v)
+ }
+
+ return string(connAttrsBuf)
+}
+
+func newConnector(cfg *Config) *connector {
+ encodedAttributes := encodeConnectionAttributes(cfg)
+ return &connector{
+ cfg: cfg,
+ encodedAttributes: encodedAttributes,
+ }
}
// Connect implements driver.Connector interface.
@@ -23,12 +66,23 @@ type connector struct {
func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
var err error
+ // Invoke beforeConnect if present, with a copy of the configuration
+ cfg := c.cfg
+ if c.cfg.beforeConnect != nil {
+ cfg = c.cfg.Clone()
+ err = c.cfg.beforeConnect(ctx, cfg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
// New mysqlConn
mc := &mysqlConn{
maxAllowedPacket: maxPacketSize,
maxWriteSize: maxPacketSize - 1,
closech: make(chan struct{}),
- cfg: c.cfg,
+ cfg: cfg,
+ connector: c,
}
mc.parseTime = mc.cfg.ParseTime
@@ -48,18 +102,15 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
nd := net.Dialer{Timeout: mc.cfg.Timeout}
mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr)
}
-
if err != nil {
return nil, err
}
+ mc.rawConn = mc.netConn
// Enable TCP Keepalives on TCP connections
if tc, ok := mc.netConn.(*net.TCPConn); ok {
if err := tc.SetKeepAlive(true); err != nil {
- // Don't send COM_QUIT before handshake.
- mc.netConn.Close()
- mc.netConn = nil
- return nil, err
+ c.cfg.Logger.Print(err)
}
}
@@ -92,7 +143,7 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
authResp, err := mc.auth(authData, plugin)
if err != nil {
// try the default auth plugin, if using the requested plugin failed
- errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
+ c.cfg.Logger.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
plugin = defaultAuthPlugin
authResp, err = mc.auth(authData, plugin)
if err != nil {
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
index 64e2bced6..22526e031 100644
--- a/vendor/github.com/go-sql-driver/mysql/const.go
+++ b/vendor/github.com/go-sql-driver/mysql/const.go
@@ -8,12 +8,25 @@
package mysql
+import "runtime"
+
const (
defaultAuthPlugin = "mysql_native_password"
defaultMaxAllowedPacket = 64 << 20 // 64 MiB. See https://github.com/go-sql-driver/mysql/issues/1355
minProtocolVersion = 10
maxPacketSize = 1<<24 - 1
timeFormat = "2006-01-02 15:04:05.999999"
+
+ // Connection attributes
+ // See https://dev.mysql.com/doc/refman/8.0/en/performance-schema-connection-attribute-tables.html#performance-schema-connection-attributes-available
+ connAttrClientName = "_client_name"
+ connAttrClientNameValue = "Go-MySQL-Driver"
+ connAttrOS = "_os"
+ connAttrOSValue = runtime.GOOS
+ connAttrPlatform = "_platform"
+ connAttrPlatformValue = runtime.GOARCH
+ connAttrPid = "_pid"
+ connAttrServerHost = "_server_host"
)
// MySQL constants documentation:
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
index ad7aec215..105316b81 100644
--- a/vendor/github.com/go-sql-driver/mysql/driver.go
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -55,6 +55,15 @@ func RegisterDialContext(net string, dial DialContextFunc) {
dials[net] = dial
}
+// DeregisterDialContext removes the custom dial function registered with the given net.
+func DeregisterDialContext(net string) {
+ dialsLock.Lock()
+ defer dialsLock.Unlock()
+ if dials != nil {
+ delete(dials, net)
+ }
+}
+
// RegisterDial registers a custom dial function. It can then be used by the
// network address mynet(addr), where mynet is the registered new network.
// addr is passed as a parameter to the dial function.
@@ -74,14 +83,18 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
if err != nil {
return nil, err
}
- c := &connector{
- cfg: cfg,
- }
+ c := newConnector(cfg)
return c.Connect(context.Background())
}
+// This variable can be replaced with -ldflags like below:
+// go build "-ldflags=-X github.com/go-sql-driver/mysql.driverName=custom"
+var driverName = "mysql"
+
func init() {
- sql.Register("mysql", &MySQLDriver{})
+ if driverName != "" {
+ sql.Register(driverName, &MySQLDriver{})
+ }
}
// NewConnector returns new driver.Connector.
@@ -92,7 +105,7 @@ func NewConnector(cfg *Config) (driver.Connector, error) {
if err := cfg.normalize(); err != nil {
return nil, err
}
- return &connector{cfg: cfg}, nil
+ return newConnector(cfg), nil
}
// OpenConnector implements driver.DriverContext.
@@ -101,7 +114,5 @@ func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) {
if err != nil {
return nil, err
}
- return &connector{
- cfg: cfg,
- }, nil
+ return newConnector(cfg), nil
}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
index 4b71aaab0..65f5a0242 100644
--- a/vendor/github.com/go-sql-driver/mysql/dsn.go
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -10,6 +10,7 @@ package mysql
import (
"bytes"
+ "context"
"crypto/rsa"
"crypto/tls"
"errors"
@@ -34,22 +35,27 @@ var (
// If a new Config is created instead of being parsed from a DSN string,
// the NewConfig function should be used, which sets default values.
type Config struct {
- User string // Username
- Passwd string // Password (requires User)
- Net string // Network type
- Addr string // Network address (requires Net)
- DBName string // Database name
- Params map[string]string // Connection parameters
- Collation string // Connection collation
- Loc *time.Location // Location for time.Time values
- MaxAllowedPacket int // Max packet size allowed
- ServerPubKey string // Server public key name
- pubKey *rsa.PublicKey // Server public key
- TLSConfig string // TLS configuration name
- TLS *tls.Config // TLS configuration, its priority is higher than TLSConfig
- Timeout time.Duration // Dial timeout
- ReadTimeout time.Duration // I/O read timeout
- WriteTimeout time.Duration // I/O write timeout
+ // non boolean fields
+
+ User string // Username
+ Passwd string // Password (requires User)
+ Net string // Network (e.g. "tcp", "tcp6", "unix". default: "tcp")
+ Addr string // Address (default: "127.0.0.1:3306" for "tcp" and "/tmp/mysql.sock" for "unix")
+ DBName string // Database name
+ Params map[string]string // Connection parameters
+ ConnectionAttributes string // Connection Attributes, comma-delimited string of user-defined "key:value" pairs
+ Collation string // Connection collation
+ Loc *time.Location // Location for time.Time values
+ MaxAllowedPacket int // Max packet size allowed
+ ServerPubKey string // Server public key name
+ TLSConfig string // TLS configuration name
+ TLS *tls.Config // TLS configuration, its priority is higher than TLSConfig
+ Timeout time.Duration // Dial timeout
+ ReadTimeout time.Duration // I/O read timeout
+ WriteTimeout time.Duration // I/O write timeout
+ Logger Logger // Logger
+
+ // boolean fields
AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
AllowCleartextPasswords bool // Allows the cleartext client side plugin
@@ -63,17 +69,57 @@ type Config struct {
MultiStatements bool // Allow multiple statements in one query
ParseTime bool // Parse time values to time.Time
RejectReadOnly bool // Reject read-only connections
+
+ // unexported fields. new options should be come here
+
+ beforeConnect func(context.Context, *Config) error // Invoked before a connection is established
+ pubKey *rsa.PublicKey // Server public key
+ timeTruncate time.Duration // Truncate time.Time values to the specified duration
}
+// Functional Options Pattern
+// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
+type Option func(*Config) error
+
// NewConfig creates a new Config and sets default values.
func NewConfig() *Config {
- return &Config{
- Collation: defaultCollation,
+ cfg := &Config{
Loc: time.UTC,
MaxAllowedPacket: defaultMaxAllowedPacket,
+ Logger: defaultLogger,
AllowNativePasswords: true,
CheckConnLiveness: true,
}
+
+ return cfg
+}
+
+// Apply applies the given options to the Config object.
+func (c *Config) Apply(opts ...Option) error {
+ for _, opt := range opts {
+ err := opt(c)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// TimeTruncate sets the time duration to truncate time.Time values in
+// query parameters.
+func TimeTruncate(d time.Duration) Option {
+ return func(cfg *Config) error {
+ cfg.timeTruncate = d
+ return nil
+ }
+}
+
+// BeforeConnect sets the function to be invoked before a connection is established.
+func BeforeConnect(fn func(context.Context, *Config) error) Option {
+ return func(cfg *Config) error {
+ cfg.beforeConnect = fn
+ return nil
+ }
}
func (cfg *Config) Clone() *Config {
@@ -97,7 +143,7 @@ func (cfg *Config) Clone() *Config {
}
func (cfg *Config) normalize() error {
- if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ if cfg.InterpolateParams && cfg.Collation != "" && unsafeCollations[cfg.Collation] {
return errInvalidDSNUnsafeCollation
}
@@ -153,6 +199,10 @@ func (cfg *Config) normalize() error {
}
}
+ if cfg.Logger == nil {
+ cfg.Logger = defaultLogger
+ }
+
return nil
}
@@ -171,6 +221,8 @@ func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) {
// FormatDSN formats the given Config into a DSN string which can be passed to
// the driver.
+//
+// Note: use [NewConnector] and [database/sql.OpenDB] to open a connection from a [*Config].
func (cfg *Config) FormatDSN() string {
var buf bytes.Buffer
@@ -196,7 +248,7 @@ func (cfg *Config) FormatDSN() string {
// /dbname
buf.WriteByte('/')
- buf.WriteString(cfg.DBName)
+ buf.WriteString(url.PathEscape(cfg.DBName))
// [?param1=value1&...¶mN=valueN]
hasParam := false
@@ -230,7 +282,7 @@ func (cfg *Config) FormatDSN() string {
writeDSNParam(&buf, &hasParam, "clientFoundRows", "true")
}
- if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
+ if col := cfg.Collation; col != "" {
writeDSNParam(&buf, &hasParam, "collation", col)
}
@@ -254,6 +306,10 @@ func (cfg *Config) FormatDSN() string {
writeDSNParam(&buf, &hasParam, "parseTime", "true")
}
+ if cfg.timeTruncate > 0 {
+ writeDSNParam(&buf, &hasParam, "timeTruncate", cfg.timeTruncate.String())
+ }
+
if cfg.ReadTimeout > 0 {
writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String())
}
@@ -358,7 +414,11 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
break
}
}
- cfg.DBName = dsn[i+1 : j]
+
+ dbname := dsn[i+1 : j]
+ if cfg.DBName, err = url.PathUnescape(dbname); err != nil {
+ return nil, fmt.Errorf("invalid dbname %q: %w", dbname, err)
+ }
break
}
@@ -378,13 +438,13 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
// Values must be url.QueryEscape'ed
func parseDSNParams(cfg *Config, params string) (err error) {
for _, v := range strings.Split(params, "&") {
- param := strings.SplitN(v, "=", 2)
- if len(param) != 2 {
+ key, value, found := strings.Cut(v, "=")
+ if !found {
continue
}
// cfg params
- switch value := param[1]; param[0] {
+ switch key {
// Disable INFILE allowlist / enable all files
case "allowAllFiles":
var isBool bool
@@ -490,6 +550,13 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return errors.New("invalid bool value: " + value)
}
+ // time.Time truncation
+ case "timeTruncate":
+ cfg.timeTruncate, err = time.ParseDuration(value)
+ if err != nil {
+ return fmt.Errorf("invalid timeTruncate value: %v, error: %w", value, err)
+ }
+
// I/O read Timeout
case "readTimeout":
cfg.ReadTimeout, err = time.ParseDuration(value)
@@ -554,13 +621,22 @@ func parseDSNParams(cfg *Config, params string) (err error) {
if err != nil {
return
}
+
+ // Connection attributes
+ case "connectionAttributes":
+ connectionAttributes, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid connectionAttributes value: %v", err)
+ }
+ cfg.ConnectionAttributes = connectionAttributes
+
default:
// lazy init
if cfg.Params == nil {
cfg.Params = make(map[string]string)
}
- if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
+ if cfg.Params[key], err = url.QueryUnescape(value); err != nil {
return
}
}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
index ff9a8f088..a7ef88909 100644
--- a/vendor/github.com/go-sql-driver/mysql/errors.go
+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -21,7 +21,7 @@ var (
ErrMalformPkt = errors.New("malformed packet")
ErrNoTLS = errors.New("TLS requested but server does not support TLS")
ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
- ErrNativePassword = errors.New("this user requires mysql native password authentication.")
+ ErrNativePassword = errors.New("this user requires mysql native password authentication")
ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
@@ -37,20 +37,26 @@ var (
errBadConnNoWrite = errors.New("bad connection")
)
-var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
+var defaultLogger = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
// Logger is used to log critical error messages.
type Logger interface {
- Print(v ...interface{})
+ Print(v ...any)
}
-// SetLogger is used to set the logger for critical errors.
+// NopLogger is a nop implementation of the Logger interface.
+type NopLogger struct{}
+
+// Print implements Logger interface.
+func (nl *NopLogger) Print(_ ...any) {}
+
+// SetLogger is used to set the default logger for critical errors.
// The initial logger is os.Stderr.
func SetLogger(logger Logger) error {
if logger == nil {
return errors.New("logger is nil")
}
- errLog = logger
+ defaultLogger = logger
return nil
}
diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
index e0654a83d..286084247 100644
--- a/vendor/github.com/go-sql-driver/mysql/fields.go
+++ b/vendor/github.com/go-sql-driver/mysql/fields.go
@@ -18,7 +18,7 @@ func (mf *mysqlField) typeDatabaseName() string {
case fieldTypeBit:
return "BIT"
case fieldTypeBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "TEXT"
}
return "BLOB"
@@ -37,6 +37,9 @@ func (mf *mysqlField) typeDatabaseName() string {
case fieldTypeGeometry:
return "GEOMETRY"
case fieldTypeInt24:
+ if mf.flags&flagUnsigned != 0 {
+ return "UNSIGNED MEDIUMINT"
+ }
return "MEDIUMINT"
case fieldTypeJSON:
return "JSON"
@@ -46,7 +49,7 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "INT"
case fieldTypeLongBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "LONGTEXT"
}
return "LONGBLOB"
@@ -56,7 +59,7 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "BIGINT"
case fieldTypeMediumBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "MEDIUMTEXT"
}
return "MEDIUMBLOB"
@@ -74,7 +77,12 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "SMALLINT"
case fieldTypeString:
- if mf.charSet == collations[binaryCollation] {
+ if mf.flags&flagEnum != 0 {
+ return "ENUM"
+ } else if mf.flags&flagSet != 0 {
+ return "SET"
+ }
+ if mf.charSet == binaryCollationID {
return "BINARY"
}
return "CHAR"
@@ -88,17 +96,17 @@ func (mf *mysqlField) typeDatabaseName() string {
}
return "TINYINT"
case fieldTypeTinyBLOB:
- if mf.charSet != collations[binaryCollation] {
+ if mf.charSet != binaryCollationID {
return "TINYTEXT"
}
return "TINYBLOB"
case fieldTypeVarChar:
- if mf.charSet == collations[binaryCollation] {
+ if mf.charSet == binaryCollationID {
return "VARBINARY"
}
return "VARCHAR"
case fieldTypeVarString:
- if mf.charSet == collations[binaryCollation] {
+ if mf.charSet == binaryCollationID {
return "VARBINARY"
}
return "VARCHAR"
@@ -110,21 +118,23 @@ func (mf *mysqlField) typeDatabaseName() string {
}
var (
- scanTypeFloat32 = reflect.TypeOf(float32(0))
- scanTypeFloat64 = reflect.TypeOf(float64(0))
- scanTypeInt8 = reflect.TypeOf(int8(0))
- scanTypeInt16 = reflect.TypeOf(int16(0))
- scanTypeInt32 = reflect.TypeOf(int32(0))
- scanTypeInt64 = reflect.TypeOf(int64(0))
- scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
- scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
- scanTypeNullTime = reflect.TypeOf(sql.NullTime{})
- scanTypeUint8 = reflect.TypeOf(uint8(0))
- scanTypeUint16 = reflect.TypeOf(uint16(0))
- scanTypeUint32 = reflect.TypeOf(uint32(0))
- scanTypeUint64 = reflect.TypeOf(uint64(0))
- scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
- scanTypeUnknown = reflect.TypeOf(new(interface{}))
+ scanTypeFloat32 = reflect.TypeOf(float32(0))
+ scanTypeFloat64 = reflect.TypeOf(float64(0))
+ scanTypeInt8 = reflect.TypeOf(int8(0))
+ scanTypeInt16 = reflect.TypeOf(int16(0))
+ scanTypeInt32 = reflect.TypeOf(int32(0))
+ scanTypeInt64 = reflect.TypeOf(int64(0))
+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
+ scanTypeNullTime = reflect.TypeOf(sql.NullTime{})
+ scanTypeUint8 = reflect.TypeOf(uint8(0))
+ scanTypeUint16 = reflect.TypeOf(uint16(0))
+ scanTypeUint32 = reflect.TypeOf(uint32(0))
+ scanTypeUint64 = reflect.TypeOf(uint64(0))
+ scanTypeString = reflect.TypeOf("")
+ scanTypeNullString = reflect.TypeOf(sql.NullString{})
+ scanTypeBytes = reflect.TypeOf([]byte{})
+ scanTypeUnknown = reflect.TypeOf(new(any))
)
type mysqlField struct {
@@ -187,12 +197,18 @@ func (mf *mysqlField) scanType() reflect.Type {
}
return scanTypeNullFloat
+ case fieldTypeBit, fieldTypeTinyBLOB, fieldTypeMediumBLOB, fieldTypeLongBLOB,
+ fieldTypeBLOB, fieldTypeVarString, fieldTypeString, fieldTypeGeometry:
+ if mf.charSet == binaryCollationID {
+ return scanTypeBytes
+ }
+ fallthrough
case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
- fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
- fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
- fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
- fieldTypeTime:
- return scanTypeRawBytes
+ fieldTypeEnum, fieldTypeSet, fieldTypeJSON, fieldTypeTime:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeString
+ }
+ return scanTypeNullString
case fieldTypeDate, fieldTypeNewDate,
fieldTypeTimestamp, fieldTypeDateTime:
diff --git a/vendor/github.com/go-sql-driver/mysql/fuzz.go b/vendor/github.com/go-sql-driver/mysql/fuzz.go
deleted file mode 100644
index 3a4ec25a9..000000000
--- a/vendor/github.com/go-sql-driver/mysql/fuzz.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
-//
-// Copyright 2020 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-//go:build gofuzz
-// +build gofuzz
-
-package mysql
-
-import (
- "database/sql"
-)
-
-func Fuzz(data []byte) int {
- db, err := sql.Open("mysql", string(data))
- if err != nil {
- return 0
- }
- db.Close()
- return 1
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
index 3279dcffd..0c8af9f11 100644
--- a/vendor/github.com/go-sql-driver/mysql/infile.go
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -93,7 +93,7 @@ func deferredClose(err *error, closer io.Closer) {
const defaultPacketSize = 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
-func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
+func (mc *okHandler) handleInFileRequest(name string) (err error) {
var rdr io.Reader
var data []byte
packetSize := defaultPacketSize
@@ -116,10 +116,10 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
defer deferredClose(&err, cl)
}
} else {
- err = fmt.Errorf("Reader '%s' is ", name)
+ err = fmt.Errorf("reader '%s' is ", name)
}
} else {
- err = fmt.Errorf("Reader '%s' is not registered", name)
+ err = fmt.Errorf("reader '%s' is not registered", name)
}
} else { // File
name = strings.Trim(name, `"`)
@@ -154,7 +154,7 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
for err == nil {
n, err = rdr.Read(data[4:])
if n > 0 {
- if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
+ if ioErr := mc.conn().writePacket(data[:4+n]); ioErr != nil {
return ioErr
}
}
@@ -168,7 +168,7 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
if data == nil {
data = make([]byte, 4)
}
- if ioErr := mc.writePacket(data[:4]); ioErr != nil {
+ if ioErr := mc.conn().writePacket(data[:4]); ioErr != nil {
return ioErr
}
@@ -177,6 +177,6 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
return mc.readResultOK()
}
- mc.readPacket()
+ mc.conn().readPacket()
return err
}
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime.go b/vendor/github.com/go-sql-driver/mysql/nulltime.go
index 36c8a42c5..316a48aae 100644
--- a/vendor/github.com/go-sql-driver/mysql/nulltime.go
+++ b/vendor/github.com/go-sql-driver/mysql/nulltime.go
@@ -38,7 +38,7 @@ type NullTime sql.NullTime
// Scan implements the Scanner interface.
// The value type must be time.Time or string / []byte (formatted time-string),
// otherwise Scan fails.
-func (nt *NullTime) Scan(value interface{}) (err error) {
+func (nt *NullTime) Scan(value any) (err error) {
if value == nil {
nt.Time, nt.Valid = time.Time{}, false
return
@@ -59,7 +59,7 @@ func (nt *NullTime) Scan(value interface{}) (err error) {
}
nt.Valid = false
- return fmt.Errorf("Can't convert %T to time.Time", value)
+ return fmt.Errorf("can't convert %T to time.Time", value)
}
// Value implements the driver Valuer interface.
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
index ee05c95a8..90a34728b 100644
--- a/vendor/github.com/go-sql-driver/mysql/packets.go
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -14,10 +14,10 @@ import (
"database/sql/driver"
"encoding/binary"
"encoding/json"
- "errors"
"fmt"
"io"
"math"
+ "strconv"
"time"
)
@@ -34,7 +34,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
if cerr := mc.canceled.Value(); cerr != nil {
return nil, cerr
}
- errLog.Print(err)
+ mc.log(err)
mc.Close()
return nil, ErrInvalidConn
}
@@ -44,6 +44,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
// check packet sync [8 bit]
if data[3] != mc.sequence {
+ mc.Close()
if data[3] > mc.sequence {
return nil, ErrPktSyncMul
}
@@ -56,7 +57,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
if pktLen == 0 {
// there was no previous packet
if prevData == nil {
- errLog.Print(ErrMalformPkt)
+ mc.log(ErrMalformPkt)
mc.Close()
return nil, ErrInvalidConn
}
@@ -70,7 +71,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
if cerr := mc.canceled.Value(); cerr != nil {
return nil, cerr
}
- errLog.Print(err)
+ mc.log(err)
mc.Close()
return nil, ErrInvalidConn
}
@@ -97,34 +98,6 @@ func (mc *mysqlConn) writePacket(data []byte) error {
return ErrPktTooLarge
}
- // Perform a stale connection check. We only perform this check for
- // the first query on a connection that has been checked out of the
- // connection pool: a fresh connection from the pool is more likely
- // to be stale, and it has not performed any previous writes that
- // could cause data corruption, so it's safe to return ErrBadConn
- // if the check fails.
- if mc.reset {
- mc.reset = false
- conn := mc.netConn
- if mc.rawConn != nil {
- conn = mc.rawConn
- }
- var err error
- if mc.cfg.CheckConnLiveness {
- if mc.cfg.ReadTimeout != 0 {
- err = conn.SetReadDeadline(time.Now().Add(mc.cfg.ReadTimeout))
- }
- if err == nil {
- err = connCheck(conn)
- }
- }
- if err != nil {
- errLog.Print("closing bad idle connection: ", err)
- mc.Close()
- return driver.ErrBadConn
- }
- }
-
for {
var size int
if pktLen >= maxPacketSize {
@@ -161,7 +134,7 @@ func (mc *mysqlConn) writePacket(data []byte) error {
// Handle error
if err == nil { // n != len(data)
mc.cleanup()
- errLog.Print(ErrMalformPkt)
+ mc.log(ErrMalformPkt)
} else {
if cerr := mc.canceled.Value(); cerr != nil {
return cerr
@@ -171,7 +144,7 @@ func (mc *mysqlConn) writePacket(data []byte) error {
return errBadConnNoWrite
}
mc.cleanup()
- errLog.Print(err)
+ mc.log(err)
}
return ErrInvalidConn
}
@@ -239,7 +212,7 @@ func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err erro
// reserved (all [00]) [10 bytes]
pos += 1 + 2 + 2 + 1 + 10
- // second part of the password cipher [mininum 13 bytes],
+ // second part of the password cipher [minimum 13 bytes],
// where len=MAX(13, length of auth-plugin-data - 8)
//
// The web documentation is ambiguous about the length. However,
@@ -285,6 +258,7 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
clientLocalFiles |
clientPluginAuth |
clientMultiResults |
+ clientConnectAttrs |
mc.flags&clientLongFlag
if mc.cfg.ClientFoundRows {
@@ -318,11 +292,17 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
pktLen += n + 1
}
+ // encode length of the connection attributes
+ var connAttrsLEIBuf [9]byte
+ connAttrsLen := len(mc.connector.encodedAttributes)
+ connAttrsLEI := appendLengthEncodedInteger(connAttrsLEIBuf[:0], uint64(connAttrsLen))
+ pktLen += len(connAttrsLEI) + len(mc.connector.encodedAttributes)
+
// Calculate packet length and get buffer with that size
- data, err := mc.buf.takeSmallBuffer(pktLen + 4)
+ data, err := mc.buf.takeBuffer(pktLen + 4)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -338,14 +318,18 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
data[10] = 0x00
data[11] = 0x00
- // Charset [1 byte]
+ // Collation ID [1 byte]
+ cname := mc.cfg.Collation
+ if cname == "" {
+ cname = defaultCollation
+ }
var found bool
- data[12], found = collations[mc.cfg.Collation]
+ data[12], found = collations[cname]
if !found {
// Note possibility for false negatives:
// could be triggered although the collation is valid if the
// collations map does not contain entries the server supports.
- return errors.New("unknown collation")
+ return fmt.Errorf("unknown collation: %q", cname)
}
// Filler [23 bytes] (all 0x00)
@@ -367,7 +351,6 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
if err := tlsConn.Handshake(); err != nil {
return err
}
- mc.rawConn = mc.netConn
mc.netConn = tlsConn
mc.buf.nc = tlsConn
}
@@ -394,6 +377,10 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
data[pos] = 0x00
pos++
+ // Connection Attributes
+ pos += copy(data[pos:], connAttrsLEI)
+ pos += copy(data[pos:], []byte(mc.connector.encodedAttributes))
+
// Send Auth packet
return mc.writePacket(data[:pos])
}
@@ -404,7 +391,7 @@ func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
data, err := mc.buf.takeSmallBuffer(pktLen)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -424,7 +411,7 @@ func (mc *mysqlConn) writeCommandPacket(command byte) error {
data, err := mc.buf.takeSmallBuffer(4 + 1)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -443,7 +430,7 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
data, err := mc.buf.takeBuffer(pktLen + 4)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -464,7 +451,7 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
data, err := mc.buf.takeSmallBuffer(4 + 1 + 4)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -495,7 +482,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
switch data[0] {
case iOK:
- return nil, "", mc.handleOkPacket(data)
+ // resultUnchanged, since auth happens before any queries or
+ // commands have been executed.
+ return nil, "", mc.resultUnchanged().handleOkPacket(data)
case iAuthMoreData:
return data[1:], "", err
@@ -518,9 +507,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
}
}
-// Returns error if Packet is not an 'Result OK'-Packet
-func (mc *mysqlConn) readResultOK() error {
- data, err := mc.readPacket()
+// Returns error if Packet is not a 'Result OK'-Packet
+func (mc *okHandler) readResultOK() error {
+ data, err := mc.conn().readPacket()
if err != nil {
return err
}
@@ -528,13 +517,17 @@ func (mc *mysqlConn) readResultOK() error {
if data[0] == iOK {
return mc.handleOkPacket(data)
}
- return mc.handleErrorPacket(data)
+ return mc.conn().handleErrorPacket(data)
}
// Result Set Header Packet
// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
-func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
- data, err := mc.readPacket()
+func (mc *okHandler) readResultSetHeaderPacket() (int, error) {
+ // handleOkPacket replaces both values; other cases leave the values unchanged.
+ mc.result.affectedRows = append(mc.result.affectedRows, 0)
+ mc.result.insertIds = append(mc.result.insertIds, 0)
+
+ data, err := mc.conn().readPacket()
if err == nil {
switch data[0] {
@@ -542,19 +535,16 @@ func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
return 0, mc.handleOkPacket(data)
case iERR:
- return 0, mc.handleErrorPacket(data)
+ return 0, mc.conn().handleErrorPacket(data)
case iLocalInFile:
return 0, mc.handleInFileRequest(string(data[1:]))
}
// column count
- num, _, n := readLengthEncodedInteger(data)
- if n-len(data) == 0 {
- return int(num), nil
- }
-
- return 0, ErrMalformPkt
+ num, _, _ := readLengthEncodedInteger(data)
+ // ignore remaining data in the packet. see #1478.
+ return int(num), nil
}
return 0, err
}
@@ -607,18 +597,61 @@ func readStatus(b []byte) statusFlag {
return statusFlag(b[0]) | statusFlag(b[1])<<8
}
+// Returns an instance of okHandler for codepaths where mysqlConn.result doesn't
+// need to be cleared first (e.g. during authentication, or while additional
+// resultsets are being fetched.)
+func (mc *mysqlConn) resultUnchanged() *okHandler {
+ return (*okHandler)(mc)
+}
+
+// okHandler represents the state of the connection when mysqlConn.result has
+// been prepared for processing of OK packets.
+//
+// To correctly populate mysqlConn.result (updated by handleOkPacket()), all
+// callpaths must either:
+//
+// 1. first clear it using clearResult(), or
+// 2. confirm that they don't need to (by calling resultUnchanged()).
+//
+// Both return an instance of type *okHandler.
+type okHandler mysqlConn
+
+// Exposes the underlying type's methods.
+func (mc *okHandler) conn() *mysqlConn {
+ return (*mysqlConn)(mc)
+}
+
+// clearResult clears the connection's stored affectedRows and insertIds
+// fields.
+//
+// It returns a handler that can process OK responses.
+func (mc *mysqlConn) clearResult() *okHandler {
+ mc.result = mysqlResult{}
+ return (*okHandler)(mc)
+}
+
// Ok Packet
// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
-func (mc *mysqlConn) handleOkPacket(data []byte) error {
+func (mc *okHandler) handleOkPacket(data []byte) error {
var n, m int
+ var affectedRows, insertId uint64
// 0x00 [1 byte]
// Affected rows [Length Coded Binary]
- mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
+ affectedRows, _, n = readLengthEncodedInteger(data[1:])
// Insert id [Length Coded Binary]
- mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
+ insertId, _, m = readLengthEncodedInteger(data[1+n:])
+
+ // Update for the current statement result (only used by
+ // readResultSetHeaderPacket).
+ if len(mc.result.affectedRows) > 0 {
+ mc.result.affectedRows[len(mc.result.affectedRows)-1] = int64(affectedRows)
+ }
+ if len(mc.result.insertIds) > 0 {
+ mc.result.insertIds[len(mc.result.insertIds)-1] = int64(insertId)
+ }
// server_status [2 bytes]
mc.status = readStatus(data[1+n+m : 1+n+m+2])
@@ -769,7 +802,8 @@ func (rows *textRows) readRow(dest []driver.Value) error {
for i := range dest {
// Read bytes and convert to string
- dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ var buf []byte
+ buf, isNull, n, err = readLengthEncodedString(data[pos:])
pos += n
if err != nil {
@@ -781,19 +815,40 @@ func (rows *textRows) readRow(dest []driver.Value) error {
continue
}
- if !mc.parseTime {
- continue
- }
-
- // Parse time field
switch rows.rs.columns[i].fieldType {
case fieldTypeTimestamp,
fieldTypeDateTime,
fieldTypeDate,
fieldTypeNewDate:
- if dest[i], err = parseDateTime(dest[i].([]byte), mc.cfg.Loc); err != nil {
- return err
+ if mc.parseTime {
+ dest[i], err = parseDateTime(buf, mc.cfg.Loc)
+ } else {
+ dest[i] = buf
}
+
+ case fieldTypeTiny, fieldTypeShort, fieldTypeInt24, fieldTypeYear, fieldTypeLong:
+ dest[i], err = strconv.ParseInt(string(buf), 10, 64)
+
+ case fieldTypeLongLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i], err = strconv.ParseUint(string(buf), 10, 64)
+ } else {
+ dest[i], err = strconv.ParseInt(string(buf), 10, 64)
+ }
+
+ case fieldTypeFloat:
+ var d float64
+ d, err = strconv.ParseFloat(string(buf), 32)
+ dest[i] = float32(d)
+
+ case fieldTypeDouble:
+ dest[i], err = strconv.ParseFloat(string(buf), 64)
+
+ default:
+ dest[i] = buf
+ }
+ if err != nil {
+ return err
}
}
@@ -938,7 +993,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
@@ -1116,7 +1171,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
if v.IsZero() {
b = append(b, "0000-00-00"...)
} else {
- b, err = appendDateTime(b, v.In(mc.cfg.Loc))
+ b, err = appendDateTime(b, v.In(mc.cfg.Loc), mc.cfg.timeTruncate)
if err != nil {
return err
}
@@ -1137,7 +1192,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
if valuesCap != cap(paramValues) {
data = append(data[:pos], paramValues...)
if err = mc.buf.store(data); err != nil {
- errLog.Print(err)
+ mc.log(err)
return errBadConnNoWrite
}
}
@@ -1149,7 +1204,9 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
return mc.writePacket(data)
}
-func (mc *mysqlConn) discardResults() error {
+// For each remaining resultset in the stream, discards its rows and updates
+// mc.affectedRows and mc.insertIds.
+func (mc *okHandler) discardResults() error {
for mc.status&statusMoreResultsExists != 0 {
resLen, err := mc.readResultSetHeaderPacket()
if err != nil {
@@ -1157,11 +1214,11 @@ func (mc *mysqlConn) discardResults() error {
}
if resLen > 0 {
// columns
- if err := mc.readUntilEOF(); err != nil {
+ if err := mc.conn().readUntilEOF(); err != nil {
return err
}
// rows
- if err := mc.readUntilEOF(); err != nil {
+ if err := mc.conn().readUntilEOF(); err != nil {
return err
}
}
diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go
index c6438d034..d51631468 100644
--- a/vendor/github.com/go-sql-driver/mysql/result.go
+++ b/vendor/github.com/go-sql-driver/mysql/result.go
@@ -8,15 +8,43 @@
package mysql
+import "database/sql/driver"
+
+// Result exposes data not available through *connection.Result.
+//
+// This is accessible by executing statements using sql.Conn.Raw() and
+// downcasting the returned result:
+//
+// res, err := rawConn.Exec(...)
+// res.(mysql.Result).AllRowsAffected()
+type Result interface {
+ driver.Result
+ // AllRowsAffected returns a slice containing the affected rows for each
+ // executed statement.
+ AllRowsAffected() []int64
+ // AllLastInsertIds returns a slice containing the last inserted ID for each
+ // executed statement.
+ AllLastInsertIds() []int64
+}
+
type mysqlResult struct {
- affectedRows int64
- insertId int64
+ // One entry in both slices is created for every executed statement result.
+ affectedRows []int64
+ insertIds []int64
}
func (res *mysqlResult) LastInsertId() (int64, error) {
- return res.insertId, nil
+ return res.insertIds[len(res.insertIds)-1], nil
}
func (res *mysqlResult) RowsAffected() (int64, error) {
- return res.affectedRows, nil
+ return res.affectedRows[len(res.affectedRows)-1], nil
+}
+
+func (res *mysqlResult) AllLastInsertIds() []int64 {
+ return append([]int64{}, res.insertIds...) // defensive copy
+}
+
+func (res *mysqlResult) AllRowsAffected() []int64 {
+ return append([]int64{}, res.affectedRows...) // defensive copy
}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
index 888bdb5f0..81fa6062c 100644
--- a/vendor/github.com/go-sql-driver/mysql/rows.go
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -123,7 +123,8 @@ func (rows *mysqlRows) Close() (err error) {
err = mc.readUntilEOF()
}
if err == nil {
- if err = mc.discardResults(); err != nil {
+ handleOk := mc.clearResult()
+ if err = handleOk.discardResults(); err != nil {
return err
}
}
@@ -160,7 +161,15 @@ func (rows *mysqlRows) nextResultSet() (int, error) {
return 0, io.EOF
}
rows.rs = resultSet{}
- return rows.mc.readResultSetHeaderPacket()
+ // rows.mc.affectedRows and rows.mc.insertIds accumulate on each call to
+ // nextResultSet.
+ resLen, err := rows.mc.resultUnchanged().readResultSetHeaderPacket()
+ if err != nil {
+ // Clean up about multi-results flag
+ rows.rs.done = true
+ rows.mc.status = rows.mc.status & (^statusMoreResultsExists)
+ }
+ return resLen, err
}
func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
index 10ece8bd6..0436f2240 100644
--- a/vendor/github.com/go-sql-driver/mysql/statement.go
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -51,7 +51,7 @@ func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) {
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
if stmt.mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ stmt.mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
@@ -61,12 +61,10 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
}
mc := stmt.mc
-
- mc.affectedRows = 0
- mc.insertId = 0
+ handleOk := stmt.mc.clearResult()
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err != nil {
return nil, err
}
@@ -83,14 +81,12 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
}
}
- if err := mc.discardResults(); err != nil {
+ if err := handleOk.discardResults(); err != nil {
return nil, err
}
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, nil
+ copied := mc.result
+ return &copied, nil
}
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
@@ -99,7 +95,7 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
if stmt.mc.closed.Load() {
- errLog.Print(ErrInvalidConn)
+ stmt.mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
@@ -111,7 +107,8 @@ func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
mc := stmt.mc
// Read Result
- resLen, err := mc.readResultSetHeaderPacket()
+ handleOk := stmt.mc.clearResult()
+ resLen, err := handleOk.readResultSetHeaderPacket()
if err != nil {
return nil, err
}
@@ -144,7 +141,7 @@ type converter struct{}
// implementation does not. This function should be kept in sync with
// database/sql/driver defaultConverter.ConvertValue() except for that
// deliberate difference.
-func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
+func (c converter) ConvertValue(v any) (driver.Value, error) {
if driver.IsValue(v) {
return v, nil
}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
index 15dbd8d16..cda24fe74 100644
--- a/vendor/github.com/go-sql-driver/mysql/utils.go
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -36,7 +36,7 @@ var (
// registering it.
//
// rootCertPool := x509.NewCertPool()
-// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
+// pem, err := os.ReadFile("/path/ca-cert.pem")
// if err != nil {
// log.Fatal(err)
// }
@@ -265,7 +265,11 @@ func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Va
return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
}
-func appendDateTime(buf []byte, t time.Time) ([]byte, error) {
+func appendDateTime(buf []byte, t time.Time, timeTruncate time.Duration) ([]byte, error) {
+ if timeTruncate > 0 {
+ t = t.Truncate(timeTruncate)
+ }
+
year, month, day := t.Date()
hour, min, sec := t.Clock()
nsec := t.Nanosecond()
@@ -616,6 +620,11 @@ func appendLengthEncodedInteger(b []byte, n uint64) []byte {
byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
}
+func appendLengthEncodedString(b []byte, s string) []byte {
+ b = appendLengthEncodedInteger(b, uint64(len(s)))
+ return append(b, s...)
+}
+
// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
// If cap(buf) is not enough, reallocate new buffer.
func reserveBuffer(buf []byte, appendSize int) []byte {
diff --git a/vendor/github.com/go-task/slim-sprig/.editorconfig b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/.editorconfig
rename to vendor/github.com/go-task/slim-sprig/v3/.editorconfig
diff --git a/vendor/github.com/go-task/slim-sprig/.gitattributes b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/.gitattributes
rename to vendor/github.com/go-task/slim-sprig/v3/.gitattributes
diff --git a/vendor/github.com/go-task/slim-sprig/.gitignore b/vendor/github.com/go-task/slim-sprig/v3/.gitignore
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/.gitignore
rename to vendor/github.com/go-task/slim-sprig/v3/.gitignore
diff --git a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md
similarity index 95%
rename from vendor/github.com/go-task/slim-sprig/CHANGELOG.md
rename to vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md
index 61d8ebffc..2ce45dd4e 100644
--- a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md
+++ b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md
@@ -1,5 +1,24 @@
# Changelog
+## Release 3.2.3 (2022-11-29)
+
+### Changed
+
+- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi)
+- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero)
+- #353: Updated masterminds/semver which included bug fixes
+- #354: Updated golang.org/x/crypto which included bug fixes
+
+## Release 3.2.2 (2021-02-04)
+
+This is a re-release of 3.2.1 to satisfy something with the Go module system.
+
+## Release 3.2.1 (2021-02-04)
+
+### Changed
+
+- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr)
+
## Release 3.2.0 (2020-12-14)
### Added
diff --git a/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/LICENSE.txt
rename to vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt
diff --git a/vendor/github.com/go-task/slim-sprig/README.md b/vendor/github.com/go-task/slim-sprig/v3/README.md
similarity index 88%
rename from vendor/github.com/go-task/slim-sprig/README.md
rename to vendor/github.com/go-task/slim-sprig/v3/README.md
index 72579471f..b5ab56425 100644
--- a/vendor/github.com/go-task/slim-sprig/README.md
+++ b/vendor/github.com/go-task/slim-sprig/v3/README.md
@@ -1,4 +1,4 @@
-# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig)
+# Slim-Sprig: Template functions for Go templates [![Go Reference](https://pkg.go.dev/badge/github.com/go-task/slim-sprig/v3.svg)](https://pkg.go.dev/github.com/go-task/slim-sprig/v3)
Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with
all functions that depend on external (non standard library) or crypto packages
diff --git a/vendor/github.com/go-task/slim-sprig/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml
similarity index 89%
rename from vendor/github.com/go-task/slim-sprig/Taskfile.yml
rename to vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml
index cdcfd223b..8e6346bb1 100644
--- a/vendor/github.com/go-task/slim-sprig/Taskfile.yml
+++ b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml
@@ -1,6 +1,6 @@
# https://taskfile.dev
-version: '2'
+version: '3'
tasks:
default:
diff --git a/vendor/github.com/go-task/slim-sprig/crypto.go b/vendor/github.com/go-task/slim-sprig/v3/crypto.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/crypto.go
rename to vendor/github.com/go-task/slim-sprig/v3/crypto.go
diff --git a/vendor/github.com/go-task/slim-sprig/date.go b/vendor/github.com/go-task/slim-sprig/v3/date.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/date.go
rename to vendor/github.com/go-task/slim-sprig/v3/date.go
diff --git a/vendor/github.com/go-task/slim-sprig/defaults.go b/vendor/github.com/go-task/slim-sprig/v3/defaults.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/defaults.go
rename to vendor/github.com/go-task/slim-sprig/v3/defaults.go
diff --git a/vendor/github.com/go-task/slim-sprig/dict.go b/vendor/github.com/go-task/slim-sprig/v3/dict.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/dict.go
rename to vendor/github.com/go-task/slim-sprig/v3/dict.go
diff --git a/vendor/github.com/go-task/slim-sprig/doc.go b/vendor/github.com/go-task/slim-sprig/v3/doc.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/doc.go
rename to vendor/github.com/go-task/slim-sprig/v3/doc.go
diff --git a/vendor/github.com/go-task/slim-sprig/functions.go b/vendor/github.com/go-task/slim-sprig/v3/functions.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/functions.go
rename to vendor/github.com/go-task/slim-sprig/v3/functions.go
diff --git a/vendor/github.com/go-task/slim-sprig/list.go b/vendor/github.com/go-task/slim-sprig/v3/list.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/list.go
rename to vendor/github.com/go-task/slim-sprig/v3/list.go
diff --git a/vendor/github.com/go-task/slim-sprig/network.go b/vendor/github.com/go-task/slim-sprig/v3/network.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/network.go
rename to vendor/github.com/go-task/slim-sprig/v3/network.go
diff --git a/vendor/github.com/go-task/slim-sprig/numeric.go b/vendor/github.com/go-task/slim-sprig/v3/numeric.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/numeric.go
rename to vendor/github.com/go-task/slim-sprig/v3/numeric.go
diff --git a/vendor/github.com/go-task/slim-sprig/reflect.go b/vendor/github.com/go-task/slim-sprig/v3/reflect.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/reflect.go
rename to vendor/github.com/go-task/slim-sprig/v3/reflect.go
diff --git a/vendor/github.com/go-task/slim-sprig/regex.go b/vendor/github.com/go-task/slim-sprig/v3/regex.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/regex.go
rename to vendor/github.com/go-task/slim-sprig/v3/regex.go
diff --git a/vendor/github.com/go-task/slim-sprig/strings.go b/vendor/github.com/go-task/slim-sprig/v3/strings.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/strings.go
rename to vendor/github.com/go-task/slim-sprig/v3/strings.go
diff --git a/vendor/github.com/go-task/slim-sprig/url.go b/vendor/github.com/go-task/slim-sprig/v3/url.go
similarity index 100%
rename from vendor/github.com/go-task/slim-sprig/url.go
rename to vendor/github.com/go-task/slim-sprig/v3/url.go
diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml
new file mode 100644
index 000000000..4f2ee4d97
--- /dev/null
+++ b/vendor/github.com/google/btree/.travis.yml
@@ -0,0 +1 @@
+language: go
diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md
index eab5dbf7b..6062a4dac 100644
--- a/vendor/github.com/google/btree/README.md
+++ b/vendor/github.com/google/btree/README.md
@@ -1,5 +1,7 @@
# BTree implementation for Go
+![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master)
+
This package provides an in-memory B-Tree implementation for Go, useful as
an ordered, mutable data structure.
diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go
index 969b910d7..b83acdbc6 100644
--- a/vendor/github.com/google/btree/btree.go
+++ b/vendor/github.com/google/btree/btree.go
@@ -12,9 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build !go1.18
-// +build !go1.18
-
// Package btree implements in-memory B-Trees of arbitrary degree.
//
// btree implements an in-memory B-Tree for use as an ordered data structure.
diff --git a/vendor/github.com/google/btree/btree_generic.go b/vendor/github.com/google/btree/btree_generic.go
deleted file mode 100644
index e44a0f488..000000000
--- a/vendor/github.com/google/btree/btree_generic.go
+++ /dev/null
@@ -1,1083 +0,0 @@
-// Copyright 2014-2022 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.18
-// +build go1.18
-
-// In Go 1.18 and beyond, a BTreeG generic is created, and BTree is a specific
-// instantiation of that generic for the Item interface, with a backwards-
-// compatible API. Before go1.18, generics are not supported,
-// and BTree is just an implementation based around the Item interface.
-
-// Package btree implements in-memory B-Trees of arbitrary degree.
-//
-// btree implements an in-memory B-Tree for use as an ordered data structure.
-// It is not meant for persistent storage solutions.
-//
-// It has a flatter structure than an equivalent red-black or other binary tree,
-// which in some cases yields better memory usage and/or performance.
-// See some discussion on the matter here:
-// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
-// Note, though, that this project is in no way related to the C++ B-Tree
-// implementation written about there.
-//
-// Within this tree, each node contains a slice of items and a (possibly nil)
-// slice of children. For basic numeric values or raw structs, this can cause
-// efficiency differences when compared to equivalent C++ template code that
-// stores values in arrays within the node:
-// * Due to the overhead of storing values as interfaces (each
-// value needs to be stored as the value itself, then 2 words for the
-// interface pointing to that value and its type), resulting in higher
-// memory use.
-// * Since interfaces can point to values anywhere in memory, values are
-// most likely not stored in contiguous blocks, resulting in a higher
-// number of cache misses.
-// These issues don't tend to matter, though, when working with strings or other
-// heap-allocated structures, since C++-equivalent structures also must store
-// pointers and also distribute their values across the heap.
-//
-// This implementation is designed to be a drop-in replacement to gollrb.LLRB
-// trees, (http://github.com/petar/gollrb), an excellent and probably the most
-// widely used ordered tree implementation in the Go ecosystem currently.
-// Its functions, therefore, exactly mirror those of
-// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
-// support storing multiple equivalent values.
-//
-// There are two implementations; those suffixed with 'G' are generics, usable
-// for any type, and require a passed-in "less" function to define their ordering.
-// Those without this prefix are specific to the 'Item' interface, and use
-// its 'Less' function for ordering.
-package btree
-
-import (
- "fmt"
- "io"
- "sort"
- "strings"
- "sync"
-)
-
-// Item represents a single object in the tree.
-type Item interface {
- // Less tests whether the current item is less than the given argument.
- //
- // This must provide a strict weak ordering.
- // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
- // hold one of either a or b in the tree).
- Less(than Item) bool
-}
-
-const (
- DefaultFreeListSize = 32
-)
-
-// FreeListG represents a free list of btree nodes. By default each
-// BTree has its own FreeList, but multiple BTrees can share the same
-// FreeList, in particular when they're created with Clone.
-// Two Btrees using the same freelist are safe for concurrent write access.
-type FreeListG[T any] struct {
- mu sync.Mutex
- freelist []*node[T]
-}
-
-// NewFreeListG creates a new free list.
-// size is the maximum size of the returned free list.
-func NewFreeListG[T any](size int) *FreeListG[T] {
- return &FreeListG[T]{freelist: make([]*node[T], 0, size)}
-}
-
-func (f *FreeListG[T]) newNode() (n *node[T]) {
- f.mu.Lock()
- index := len(f.freelist) - 1
- if index < 0 {
- f.mu.Unlock()
- return new(node[T])
- }
- n = f.freelist[index]
- f.freelist[index] = nil
- f.freelist = f.freelist[:index]
- f.mu.Unlock()
- return
-}
-
-func (f *FreeListG[T]) freeNode(n *node[T]) (out bool) {
- f.mu.Lock()
- if len(f.freelist) < cap(f.freelist) {
- f.freelist = append(f.freelist, n)
- out = true
- }
- f.mu.Unlock()
- return
-}
-
-// ItemIteratorG allows callers of {A/De}scend* to iterate in-order over portions of
-// the tree. When this function returns false, iteration will stop and the
-// associated Ascend* function will immediately return.
-type ItemIteratorG[T any] func(item T) bool
-
-// Ordered represents the set of types for which the '<' operator work.
-type Ordered interface {
- ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~float32 | ~float64 | ~string
-}
-
-// Less[T] returns a default LessFunc that uses the '<' operator for types that support it.
-func Less[T Ordered]() LessFunc[T] {
- return func(a, b T) bool { return a < b }
-}
-
-// NewOrderedG creates a new B-Tree for ordered types.
-func NewOrderedG[T Ordered](degree int) *BTreeG[T] {
- return NewG[T](degree, Less[T]())
-}
-
-// NewG creates a new B-Tree with the given degree.
-//
-// NewG(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
-// and 2-4 children).
-//
-// The passed-in LessFunc determines how objects of type T are ordered.
-func NewG[T any](degree int, less LessFunc[T]) *BTreeG[T] {
- return NewWithFreeListG(degree, less, NewFreeListG[T](DefaultFreeListSize))
-}
-
-// NewWithFreeListG creates a new B-Tree that uses the given node free list.
-func NewWithFreeListG[T any](degree int, less LessFunc[T], f *FreeListG[T]) *BTreeG[T] {
- if degree <= 1 {
- panic("bad degree")
- }
- return &BTreeG[T]{
- degree: degree,
- cow: ©OnWriteContext[T]{freelist: f, less: less},
- }
-}
-
-// items stores items in a node.
-type items[T any] []T
-
-// insertAt inserts a value into the given index, pushing all subsequent values
-// forward.
-func (s *items[T]) insertAt(index int, item T) {
- var zero T
- *s = append(*s, zero)
- if index < len(*s) {
- copy((*s)[index+1:], (*s)[index:])
- }
- (*s)[index] = item
-}
-
-// removeAt removes a value at a given index, pulling all subsequent values
-// back.
-func (s *items[T]) removeAt(index int) T {
- item := (*s)[index]
- copy((*s)[index:], (*s)[index+1:])
- var zero T
- (*s)[len(*s)-1] = zero
- *s = (*s)[:len(*s)-1]
- return item
-}
-
-// pop removes and returns the last element in the list.
-func (s *items[T]) pop() (out T) {
- index := len(*s) - 1
- out = (*s)[index]
- var zero T
- (*s)[index] = zero
- *s = (*s)[:index]
- return
-}
-
-// truncate truncates this instance at index so that it contains only the
-// first index items. index must be less than or equal to length.
-func (s *items[T]) truncate(index int) {
- var toClear items[T]
- *s, toClear = (*s)[:index], (*s)[index:]
- var zero T
- for i := 0; i < len(toClear); i++ {
- toClear[i] = zero
- }
-}
-
-// find returns the index where the given item should be inserted into this
-// list. 'found' is true if the item already exists in the list at the given
-// index.
-func (s items[T]) find(item T, less func(T, T) bool) (index int, found bool) {
- i := sort.Search(len(s), func(i int) bool {
- return less(item, s[i])
- })
- if i > 0 && !less(s[i-1], item) {
- return i - 1, true
- }
- return i, false
-}
-
-// node is an internal node in a tree.
-//
-// It must at all times maintain the invariant that either
-// * len(children) == 0, len(items) unconstrained
-// * len(children) == len(items) + 1
-type node[T any] struct {
- items items[T]
- children items[*node[T]]
- cow *copyOnWriteContext[T]
-}
-
-func (n *node[T]) mutableFor(cow *copyOnWriteContext[T]) *node[T] {
- if n.cow == cow {
- return n
- }
- out := cow.newNode()
- if cap(out.items) >= len(n.items) {
- out.items = out.items[:len(n.items)]
- } else {
- out.items = make(items[T], len(n.items), cap(n.items))
- }
- copy(out.items, n.items)
- // Copy children
- if cap(out.children) >= len(n.children) {
- out.children = out.children[:len(n.children)]
- } else {
- out.children = make(items[*node[T]], len(n.children), cap(n.children))
- }
- copy(out.children, n.children)
- return out
-}
-
-func (n *node[T]) mutableChild(i int) *node[T] {
- c := n.children[i].mutableFor(n.cow)
- n.children[i] = c
- return c
-}
-
-// split splits the given node at the given index. The current node shrinks,
-// and this function returns the item that existed at that index and a new node
-// containing all items/children after it.
-func (n *node[T]) split(i int) (T, *node[T]) {
- item := n.items[i]
- next := n.cow.newNode()
- next.items = append(next.items, n.items[i+1:]...)
- n.items.truncate(i)
- if len(n.children) > 0 {
- next.children = append(next.children, n.children[i+1:]...)
- n.children.truncate(i + 1)
- }
- return item, next
-}
-
-// maybeSplitChild checks if a child should be split, and if so splits it.
-// Returns whether or not a split occurred.
-func (n *node[T]) maybeSplitChild(i, maxItems int) bool {
- if len(n.children[i].items) < maxItems {
- return false
- }
- first := n.mutableChild(i)
- item, second := first.split(maxItems / 2)
- n.items.insertAt(i, item)
- n.children.insertAt(i+1, second)
- return true
-}
-
-// insert inserts an item into the subtree rooted at this node, making sure
-// no nodes in the subtree exceed maxItems items. Should an equivalent item be
-// be found/replaced by insert, it will be returned.
-func (n *node[T]) insert(item T, maxItems int) (_ T, _ bool) {
- i, found := n.items.find(item, n.cow.less)
- if found {
- out := n.items[i]
- n.items[i] = item
- return out, true
- }
- if len(n.children) == 0 {
- n.items.insertAt(i, item)
- return
- }
- if n.maybeSplitChild(i, maxItems) {
- inTree := n.items[i]
- switch {
- case n.cow.less(item, inTree):
- // no change, we want first split node
- case n.cow.less(inTree, item):
- i++ // we want second split node
- default:
- out := n.items[i]
- n.items[i] = item
- return out, true
- }
- }
- return n.mutableChild(i).insert(item, maxItems)
-}
-
-// get finds the given key in the subtree and returns it.
-func (n *node[T]) get(key T) (_ T, _ bool) {
- i, found := n.items.find(key, n.cow.less)
- if found {
- return n.items[i], true
- } else if len(n.children) > 0 {
- return n.children[i].get(key)
- }
- return
-}
-
-// min returns the first item in the subtree.
-func min[T any](n *node[T]) (_ T, found bool) {
- if n == nil {
- return
- }
- for len(n.children) > 0 {
- n = n.children[0]
- }
- if len(n.items) == 0 {
- return
- }
- return n.items[0], true
-}
-
-// max returns the last item in the subtree.
-func max[T any](n *node[T]) (_ T, found bool) {
- if n == nil {
- return
- }
- for len(n.children) > 0 {
- n = n.children[len(n.children)-1]
- }
- if len(n.items) == 0 {
- return
- }
- return n.items[len(n.items)-1], true
-}
-
-// toRemove details what item to remove in a node.remove call.
-type toRemove int
-
-const (
- removeItem toRemove = iota // removes the given item
- removeMin // removes smallest item in the subtree
- removeMax // removes largest item in the subtree
-)
-
-// remove removes an item from the subtree rooted at this node.
-func (n *node[T]) remove(item T, minItems int, typ toRemove) (_ T, _ bool) {
- var i int
- var found bool
- switch typ {
- case removeMax:
- if len(n.children) == 0 {
- return n.items.pop(), true
- }
- i = len(n.items)
- case removeMin:
- if len(n.children) == 0 {
- return n.items.removeAt(0), true
- }
- i = 0
- case removeItem:
- i, found = n.items.find(item, n.cow.less)
- if len(n.children) == 0 {
- if found {
- return n.items.removeAt(i), true
- }
- return
- }
- default:
- panic("invalid type")
- }
- // If we get to here, we have children.
- if len(n.children[i].items) <= minItems {
- return n.growChildAndRemove(i, item, minItems, typ)
- }
- child := n.mutableChild(i)
- // Either we had enough items to begin with, or we've done some
- // merging/stealing, because we've got enough now and we're ready to return
- // stuff.
- if found {
- // The item exists at index 'i', and the child we've selected can give us a
- // predecessor, since if we've gotten here it's got > minItems items in it.
- out := n.items[i]
- // We use our special-case 'remove' call with typ=maxItem to pull the
- // predecessor of item i (the rightmost leaf of our immediate left child)
- // and set it into where we pulled the item from.
- var zero T
- n.items[i], _ = child.remove(zero, minItems, removeMax)
- return out, true
- }
- // Final recursive call. Once we're here, we know that the item isn't in this
- // node and that the child is big enough to remove from.
- return child.remove(item, minItems, typ)
-}
-
-// growChildAndRemove grows child 'i' to make sure it's possible to remove an
-// item from it while keeping it at minItems, then calls remove to actually
-// remove it.
-//
-// Most documentation says we have to do two sets of special casing:
-// 1) item is in this node
-// 2) item is in child
-// In both cases, we need to handle the two subcases:
-// A) node has enough values that it can spare one
-// B) node doesn't have enough values
-// For the latter, we have to check:
-// a) left sibling has node to spare
-// b) right sibling has node to spare
-// c) we must merge
-// To simplify our code here, we handle cases #1 and #2 the same:
-// If a node doesn't have enough items, we make sure it does (using a,b,c).
-// We then simply redo our remove call, and the second time (regardless of
-// whether we're in case 1 or 2), we'll have enough items and can guarantee
-// that we hit case A.
-func (n *node[T]) growChildAndRemove(i int, item T, minItems int, typ toRemove) (T, bool) {
- if i > 0 && len(n.children[i-1].items) > minItems {
- // Steal from left child
- child := n.mutableChild(i)
- stealFrom := n.mutableChild(i - 1)
- stolenItem := stealFrom.items.pop()
- child.items.insertAt(0, n.items[i-1])
- n.items[i-1] = stolenItem
- if len(stealFrom.children) > 0 {
- child.children.insertAt(0, stealFrom.children.pop())
- }
- } else if i < len(n.items) && len(n.children[i+1].items) > minItems {
- // steal from right child
- child := n.mutableChild(i)
- stealFrom := n.mutableChild(i + 1)
- stolenItem := stealFrom.items.removeAt(0)
- child.items = append(child.items, n.items[i])
- n.items[i] = stolenItem
- if len(stealFrom.children) > 0 {
- child.children = append(child.children, stealFrom.children.removeAt(0))
- }
- } else {
- if i >= len(n.items) {
- i--
- }
- child := n.mutableChild(i)
- // merge with right child
- mergeItem := n.items.removeAt(i)
- mergeChild := n.children.removeAt(i + 1)
- child.items = append(child.items, mergeItem)
- child.items = append(child.items, mergeChild.items...)
- child.children = append(child.children, mergeChild.children...)
- n.cow.freeNode(mergeChild)
- }
- return n.remove(item, minItems, typ)
-}
-
-type direction int
-
-const (
- descend = direction(-1)
- ascend = direction(+1)
-)
-
-type optionalItem[T any] struct {
- item T
- valid bool
-}
-
-func optional[T any](item T) optionalItem[T] {
- return optionalItem[T]{item: item, valid: true}
-}
-func empty[T any]() optionalItem[T] {
- return optionalItem[T]{}
-}
-
-// iterate provides a simple method for iterating over elements in the tree.
-//
-// When ascending, the 'start' should be less than 'stop' and when descending,
-// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
-// will force the iterator to include the first item when it equals 'start',
-// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
-// "greaterThan" or "lessThan" queries.
-func (n *node[T]) iterate(dir direction, start, stop optionalItem[T], includeStart bool, hit bool, iter ItemIteratorG[T]) (bool, bool) {
- var ok, found bool
- var index int
- switch dir {
- case ascend:
- if start.valid {
- index, _ = n.items.find(start.item, n.cow.less)
- }
- for i := index; i < len(n.items); i++ {
- if len(n.children) > 0 {
- if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
- return hit, false
- }
- }
- if !includeStart && !hit && start.valid && !n.cow.less(start.item, n.items[i]) {
- hit = true
- continue
- }
- hit = true
- if stop.valid && !n.cow.less(n.items[i], stop.item) {
- return hit, false
- }
- if !iter(n.items[i]) {
- return hit, false
- }
- }
- if len(n.children) > 0 {
- if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
- return hit, false
- }
- }
- case descend:
- if start.valid {
- index, found = n.items.find(start.item, n.cow.less)
- if !found {
- index = index - 1
- }
- } else {
- index = len(n.items) - 1
- }
- for i := index; i >= 0; i-- {
- if start.valid && !n.cow.less(n.items[i], start.item) {
- if !includeStart || hit || n.cow.less(start.item, n.items[i]) {
- continue
- }
- }
- if len(n.children) > 0 {
- if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
- return hit, false
- }
- }
- if stop.valid && !n.cow.less(stop.item, n.items[i]) {
- return hit, false // continue
- }
- hit = true
- if !iter(n.items[i]) {
- return hit, false
- }
- }
- if len(n.children) > 0 {
- if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
- return hit, false
- }
- }
- }
- return hit, true
-}
-
-// print is used for testing/debugging purposes.
-func (n *node[T]) print(w io.Writer, level int) {
- fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
- for _, c := range n.children {
- c.print(w, level+1)
- }
-}
-
-// BTreeG is a generic implementation of a B-Tree.
-//
-// BTreeG stores items of type T in an ordered structure, allowing easy insertion,
-// removal, and iteration.
-//
-// Write operations are not safe for concurrent mutation by multiple
-// goroutines, but Read operations are.
-type BTreeG[T any] struct {
- degree int
- length int
- root *node[T]
- cow *copyOnWriteContext[T]
-}
-
-// LessFunc[T] determines how to order a type 'T'. It should implement a strict
-// ordering, and should return true if within that ordering, 'a' < 'b'.
-type LessFunc[T any] func(a, b T) bool
-
-// copyOnWriteContext pointers determine node ownership... a tree with a write
-// context equivalent to a node's write context is allowed to modify that node.
-// A tree whose write context does not match a node's is not allowed to modify
-// it, and must create a new, writable copy (IE: it's a Clone).
-//
-// When doing any write operation, we maintain the invariant that the current
-// node's context is equal to the context of the tree that requested the write.
-// We do this by, before we descend into any node, creating a copy with the
-// correct context if the contexts don't match.
-//
-// Since the node we're currently visiting on any write has the requesting
-// tree's context, that node is modifiable in place. Children of that node may
-// not share context, but before we descend into them, we'll make a mutable
-// copy.
-type copyOnWriteContext[T any] struct {
- freelist *FreeListG[T]
- less LessFunc[T]
-}
-
-// Clone clones the btree, lazily. Clone should not be called concurrently,
-// but the original tree (t) and the new tree (t2) can be used concurrently
-// once the Clone call completes.
-//
-// The internal tree structure of b is marked read-only and shared between t and
-// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes
-// whenever one of b's original nodes would have been modified. Read operations
-// should have no performance degredation. Write operations for both t and t2
-// will initially experience minor slow-downs caused by additional allocs and
-// copies due to the aforementioned copy-on-write logic, but should converge to
-// the original performance characteristics of the original tree.
-func (t *BTreeG[T]) Clone() (t2 *BTreeG[T]) {
- // Create two entirely new copy-on-write contexts.
- // This operation effectively creates three trees:
- // the original, shared nodes (old b.cow)
- // the new b.cow nodes
- // the new out.cow nodes
- cow1, cow2 := *t.cow, *t.cow
- out := *t
- t.cow = &cow1
- out.cow = &cow2
- return &out
-}
-
-// maxItems returns the max number of items to allow per node.
-func (t *BTreeG[T]) maxItems() int {
- return t.degree*2 - 1
-}
-
-// minItems returns the min number of items to allow per node (ignored for the
-// root node).
-func (t *BTreeG[T]) minItems() int {
- return t.degree - 1
-}
-
-func (c *copyOnWriteContext[T]) newNode() (n *node[T]) {
- n = c.freelist.newNode()
- n.cow = c
- return
-}
-
-type freeType int
-
-const (
- ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
- ftStored // node was stored in the freelist for later use
- ftNotOwned // node was ignored by COW, since it's owned by another one
-)
-
-// freeNode frees a node within a given COW context, if it's owned by that
-// context. It returns what happened to the node (see freeType const
-// documentation).
-func (c *copyOnWriteContext[T]) freeNode(n *node[T]) freeType {
- if n.cow == c {
- // clear to allow GC
- n.items.truncate(0)
- n.children.truncate(0)
- n.cow = nil
- if c.freelist.freeNode(n) {
- return ftStored
- } else {
- return ftFreelistFull
- }
- } else {
- return ftNotOwned
- }
-}
-
-// ReplaceOrInsert adds the given item to the tree. If an item in the tree
-// already equals the given one, it is removed from the tree and returned,
-// and the second return value is true. Otherwise, (zeroValue, false)
-//
-// nil cannot be added to the tree (will panic).
-func (t *BTreeG[T]) ReplaceOrInsert(item T) (_ T, _ bool) {
- if t.root == nil {
- t.root = t.cow.newNode()
- t.root.items = append(t.root.items, item)
- t.length++
- return
- } else {
- t.root = t.root.mutableFor(t.cow)
- if len(t.root.items) >= t.maxItems() {
- item2, second := t.root.split(t.maxItems() / 2)
- oldroot := t.root
- t.root = t.cow.newNode()
- t.root.items = append(t.root.items, item2)
- t.root.children = append(t.root.children, oldroot, second)
- }
- }
- out, outb := t.root.insert(item, t.maxItems())
- if !outb {
- t.length++
- }
- return out, outb
-}
-
-// Delete removes an item equal to the passed in item from the tree, returning
-// it. If no such item exists, returns (zeroValue, false).
-func (t *BTreeG[T]) Delete(item T) (T, bool) {
- return t.deleteItem(item, removeItem)
-}
-
-// DeleteMin removes the smallest item in the tree and returns it.
-// If no such item exists, returns (zeroValue, false).
-func (t *BTreeG[T]) DeleteMin() (T, bool) {
- var zero T
- return t.deleteItem(zero, removeMin)
-}
-
-// DeleteMax removes the largest item in the tree and returns it.
-// If no such item exists, returns (zeroValue, false).
-func (t *BTreeG[T]) DeleteMax() (T, bool) {
- var zero T
- return t.deleteItem(zero, removeMax)
-}
-
-func (t *BTreeG[T]) deleteItem(item T, typ toRemove) (_ T, _ bool) {
- if t.root == nil || len(t.root.items) == 0 {
- return
- }
- t.root = t.root.mutableFor(t.cow)
- out, outb := t.root.remove(item, t.minItems(), typ)
- if len(t.root.items) == 0 && len(t.root.children) > 0 {
- oldroot := t.root
- t.root = t.root.children[0]
- t.cow.freeNode(oldroot)
- }
- if outb {
- t.length--
- }
- return out, outb
-}
-
-// AscendRange calls the iterator for every value in the tree within the range
-// [greaterOrEqual, lessThan), until iterator returns false.
-func (t *BTreeG[T]) AscendRange(greaterOrEqual, lessThan T, iterator ItemIteratorG[T]) {
- if t.root == nil {
- return
- }
- t.root.iterate(ascend, optional[T](greaterOrEqual), optional[T](lessThan), true, false, iterator)
-}
-
-// AscendLessThan calls the iterator for every value in the tree within the range
-// [first, pivot), until iterator returns false.
-func (t *BTreeG[T]) AscendLessThan(pivot T, iterator ItemIteratorG[T]) {
- if t.root == nil {
- return
- }
- t.root.iterate(ascend, empty[T](), optional(pivot), false, false, iterator)
-}
-
-// AscendGreaterOrEqual calls the iterator for every value in the tree within
-// the range [pivot, last], until iterator returns false.
-func (t *BTreeG[T]) AscendGreaterOrEqual(pivot T, iterator ItemIteratorG[T]) {
- if t.root == nil {
- return
- }
- t.root.iterate(ascend, optional[T](pivot), empty[T](), true, false, iterator)
-}
-
-// Ascend calls the iterator for every value in the tree within the range
-// [first, last], until iterator returns false.
-func (t *BTreeG[T]) Ascend(iterator ItemIteratorG[T]) {
- if t.root == nil {
- return
- }
- t.root.iterate(ascend, empty[T](), empty[T](), false, false, iterator)
-}
-
-// DescendRange calls the iterator for every value in the tree within the range
-// [lessOrEqual, greaterThan), until iterator returns false.
-func (t *BTreeG[T]) DescendRange(lessOrEqual, greaterThan T, iterator ItemIteratorG[T]) {
- if t.root == nil {
- return
- }
- t.root.iterate(descend, optional[T](lessOrEqual), optional[T](greaterThan), true, false, iterator)
-}
-
-// DescendLessOrEqual calls the iterator for every value in the tree within the range
-// [pivot, first], until iterator returns false.
-func (t *BTreeG[T]) DescendLessOrEqual(pivot T, iterator ItemIteratorG[T]) {
- if t.root == nil {
- return
- }
- t.root.iterate(descend, optional[T](pivot), empty[T](), true, false, iterator)
-}
-
-// DescendGreaterThan calls the iterator for every value in the tree within
-// the range [last, pivot), until iterator returns false.
-func (t *BTreeG[T]) DescendGreaterThan(pivot T, iterator ItemIteratorG[T]) {
- if t.root == nil {
- return
- }
- t.root.iterate(descend, empty[T](), optional[T](pivot), false, false, iterator)
-}
-
-// Descend calls the iterator for every value in the tree within the range
-// [last, first], until iterator returns false.
-func (t *BTreeG[T]) Descend(iterator ItemIteratorG[T]) {
- if t.root == nil {
- return
- }
- t.root.iterate(descend, empty[T](), empty[T](), false, false, iterator)
-}
-
-// Get looks for the key item in the tree, returning it. It returns
-// (zeroValue, false) if unable to find that item.
-func (t *BTreeG[T]) Get(key T) (_ T, _ bool) {
- if t.root == nil {
- return
- }
- return t.root.get(key)
-}
-
-// Min returns the smallest item in the tree, or (zeroValue, false) if the tree is empty.
-func (t *BTreeG[T]) Min() (_ T, _ bool) {
- return min(t.root)
-}
-
-// Max returns the largest item in the tree, or (zeroValue, false) if the tree is empty.
-func (t *BTreeG[T]) Max() (_ T, _ bool) {
- return max(t.root)
-}
-
-// Has returns true if the given key is in the tree.
-func (t *BTreeG[T]) Has(key T) bool {
- _, ok := t.Get(key)
- return ok
-}
-
-// Len returns the number of items currently in the tree.
-func (t *BTreeG[T]) Len() int {
- return t.length
-}
-
-// Clear removes all items from the btree. If addNodesToFreelist is true,
-// t's nodes are added to its freelist as part of this call, until the freelist
-// is full. Otherwise, the root node is simply dereferenced and the subtree
-// left to Go's normal GC processes.
-//
-// This can be much faster
-// than calling Delete on all elements, because that requires finding/removing
-// each element in the tree and updating the tree accordingly. It also is
-// somewhat faster than creating a new tree to replace the old one, because
-// nodes from the old tree are reclaimed into the freelist for use by the new
-// one, instead of being lost to the garbage collector.
-//
-// This call takes:
-// O(1): when addNodesToFreelist is false, this is a single operation.
-// O(1): when the freelist is already full, it breaks out immediately
-// O(freelist size): when the freelist is empty and the nodes are all owned
-// by this tree, nodes are added to the freelist until full.
-// O(tree size): when all nodes are owned by another tree, all nodes are
-// iterated over looking for nodes to add to the freelist, and due to
-// ownership, none are.
-func (t *BTreeG[T]) Clear(addNodesToFreelist bool) {
- if t.root != nil && addNodesToFreelist {
- t.root.reset(t.cow)
- }
- t.root, t.length = nil, 0
-}
-
-// reset returns a subtree to the freelist. It breaks out immediately if the
-// freelist is full, since the only benefit of iterating is to fill that
-// freelist up. Returns true if parent reset call should continue.
-func (n *node[T]) reset(c *copyOnWriteContext[T]) bool {
- for _, child := range n.children {
- if !child.reset(c) {
- return false
- }
- }
- return c.freeNode(n) != ftFreelistFull
-}
-
-// Int implements the Item interface for integers.
-type Int int
-
-// Less returns true if int(a) < int(b).
-func (a Int) Less(b Item) bool {
- return a < b.(Int)
-}
-
-// BTree is an implementation of a B-Tree.
-//
-// BTree stores Item instances in an ordered structure, allowing easy insertion,
-// removal, and iteration.
-//
-// Write operations are not safe for concurrent mutation by multiple
-// goroutines, but Read operations are.
-type BTree BTreeG[Item]
-
-var itemLess LessFunc[Item] = func(a, b Item) bool {
- return a.Less(b)
-}
-
-// New creates a new B-Tree with the given degree.
-//
-// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
-// and 2-4 children).
-func New(degree int) *BTree {
- return (*BTree)(NewG[Item](degree, itemLess))
-}
-
-// FreeList represents a free list of btree nodes. By default each
-// BTree has its own FreeList, but multiple BTrees can share the same
-// FreeList.
-// Two Btrees using the same freelist are safe for concurrent write access.
-type FreeList FreeListG[Item]
-
-// NewFreeList creates a new free list.
-// size is the maximum size of the returned free list.
-func NewFreeList(size int) *FreeList {
- return (*FreeList)(NewFreeListG[Item](size))
-}
-
-// NewWithFreeList creates a new B-Tree that uses the given node free list.
-func NewWithFreeList(degree int, f *FreeList) *BTree {
- return (*BTree)(NewWithFreeListG[Item](degree, itemLess, (*FreeListG[Item])(f)))
-}
-
-// ItemIterator allows callers of Ascend* to iterate in-order over portions of
-// the tree. When this function returns false, iteration will stop and the
-// associated Ascend* function will immediately return.
-type ItemIterator ItemIteratorG[Item]
-
-// Clone clones the btree, lazily. Clone should not be called concurrently,
-// but the original tree (t) and the new tree (t2) can be used concurrently
-// once the Clone call completes.
-//
-// The internal tree structure of b is marked read-only and shared between t and
-// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes
-// whenever one of b's original nodes would have been modified. Read operations
-// should have no performance degredation. Write operations for both t and t2
-// will initially experience minor slow-downs caused by additional allocs and
-// copies due to the aforementioned copy-on-write logic, but should converge to
-// the original performance characteristics of the original tree.
-func (t *BTree) Clone() (t2 *BTree) {
- return (*BTree)((*BTreeG[Item])(t).Clone())
-}
-
-// Delete removes an item equal to the passed in item from the tree, returning
-// it. If no such item exists, returns nil.
-func (t *BTree) Delete(item Item) Item {
- i, _ := (*BTreeG[Item])(t).Delete(item)
- return i
-}
-
-// DeleteMax removes the largest item in the tree and returns it.
-// If no such item exists, returns nil.
-func (t *BTree) DeleteMax() Item {
- i, _ := (*BTreeG[Item])(t).DeleteMax()
- return i
-}
-
-// DeleteMin removes the smallest item in the tree and returns it.
-// If no such item exists, returns nil.
-func (t *BTree) DeleteMin() Item {
- i, _ := (*BTreeG[Item])(t).DeleteMin()
- return i
-}
-
-// Get looks for the key item in the tree, returning it. It returns nil if
-// unable to find that item.
-func (t *BTree) Get(key Item) Item {
- i, _ := (*BTreeG[Item])(t).Get(key)
- return i
-}
-
-// Max returns the largest item in the tree, or nil if the tree is empty.
-func (t *BTree) Max() Item {
- i, _ := (*BTreeG[Item])(t).Max()
- return i
-}
-
-// Min returns the smallest item in the tree, or nil if the tree is empty.
-func (t *BTree) Min() Item {
- i, _ := (*BTreeG[Item])(t).Min()
- return i
-}
-
-// Has returns true if the given key is in the tree.
-func (t *BTree) Has(key Item) bool {
- return (*BTreeG[Item])(t).Has(key)
-}
-
-// ReplaceOrInsert adds the given item to the tree. If an item in the tree
-// already equals the given one, it is removed from the tree and returned.
-// Otherwise, nil is returned.
-//
-// nil cannot be added to the tree (will panic).
-func (t *BTree) ReplaceOrInsert(item Item) Item {
- i, _ := (*BTreeG[Item])(t).ReplaceOrInsert(item)
- return i
-}
-
-// AscendRange calls the iterator for every value in the tree within the range
-// [greaterOrEqual, lessThan), until iterator returns false.
-func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
- (*BTreeG[Item])(t).AscendRange(greaterOrEqual, lessThan, (ItemIteratorG[Item])(iterator))
-}
-
-// AscendLessThan calls the iterator for every value in the tree within the range
-// [first, pivot), until iterator returns false.
-func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
- (*BTreeG[Item])(t).AscendLessThan(pivot, (ItemIteratorG[Item])(iterator))
-}
-
-// AscendGreaterOrEqual calls the iterator for every value in the tree within
-// the range [pivot, last], until iterator returns false.
-func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
- (*BTreeG[Item])(t).AscendGreaterOrEqual(pivot, (ItemIteratorG[Item])(iterator))
-}
-
-// Ascend calls the iterator for every value in the tree within the range
-// [first, last], until iterator returns false.
-func (t *BTree) Ascend(iterator ItemIterator) {
- (*BTreeG[Item])(t).Ascend((ItemIteratorG[Item])(iterator))
-}
-
-// DescendRange calls the iterator for every value in the tree within the range
-// [lessOrEqual, greaterThan), until iterator returns false.
-func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
- (*BTreeG[Item])(t).DescendRange(lessOrEqual, greaterThan, (ItemIteratorG[Item])(iterator))
-}
-
-// DescendLessOrEqual calls the iterator for every value in the tree within the range
-// [pivot, first], until iterator returns false.
-func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
- (*BTreeG[Item])(t).DescendLessOrEqual(pivot, (ItemIteratorG[Item])(iterator))
-}
-
-// DescendGreaterThan calls the iterator for every value in the tree within
-// the range [last, pivot), until iterator returns false.
-func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
- (*BTreeG[Item])(t).DescendGreaterThan(pivot, (ItemIteratorG[Item])(iterator))
-}
-
-// Descend calls the iterator for every value in the tree within the range
-// [last, first], until iterator returns false.
-func (t *BTree) Descend(iterator ItemIterator) {
- (*BTreeG[Item])(t).Descend((ItemIteratorG[Item])(iterator))
-}
-
-// Len returns the number of items currently in the tree.
-func (t *BTree) Len() int {
- return (*BTreeG[Item])(t).Len()
-}
-
-// Clear removes all items from the btree. If addNodesToFreelist is true,
-// t's nodes are added to its freelist as part of this call, until the freelist
-// is full. Otherwise, the root node is simply dereferenced and the subtree
-// left to Go's normal GC processes.
-//
-// This can be much faster
-// than calling Delete on all elements, because that requires finding/removing
-// each element in the tree and updating the tree accordingly. It also is
-// somewhat faster than creating a new tree to replace the old one, because
-// nodes from the old tree are reclaimed into the freelist for use by the new
-// one, instead of being lost to the garbage collector.
-//
-// This call takes:
-// O(1): when addNodesToFreelist is false, this is a single operation.
-// O(1): when the freelist is already full, it breaks out immediately
-// O(freelist size): when the freelist is empty and the nodes are all owned
-// by this tree, nodes are added to the freelist until full.
-// O(tree size): when all nodes are owned by another tree, all nodes are
-// iterated over looking for nodes to add to the freelist, and due to
-// ownership, none are.
-func (t *BTree) Clear(addNodesToFreelist bool) {
- (*BTreeG[Item])(t).Clear(addNodesToFreelist)
-}
diff --git a/vendor/github.com/google/cel-go/checker/cost.go b/vendor/github.com/google/cel-go/checker/cost.go
index f232f30da..fd3f73505 100644
--- a/vendor/github.com/google/cel-go/checker/cost.go
+++ b/vendor/github.com/google/cel-go/checker/cost.go
@@ -520,6 +520,9 @@ func (c *coster) costComprehension(e *exprpb.Expr) CostEstimate {
c.iterRanges.pop(comp.GetIterVar())
sum = sum.Add(c.cost(comp.Result))
rangeCnt := c.sizeEstimate(c.newAstNode(comp.GetIterRange()))
+
+ c.computedSizes[e.GetId()] = rangeCnt
+
rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost))
sum = sum.Add(rangeCost)
diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go
index c8a1beb8a..860bb304c 100644
--- a/vendor/github.com/google/pprof/profile/encode.go
+++ b/vendor/github.com/google/pprof/profile/encode.go
@@ -258,10 +258,10 @@ func (p *Profile) postDecode() error {
// If this a main linux kernel mapping with a relocation symbol suffix
// ("[kernel.kallsyms]_text"), extract said suffix.
// It is fairly hacky to handle at this level, but the alternatives appear even worse.
- if strings.HasPrefix(m.File, "[kernel.kallsyms]") {
- m.KernelRelocationSymbol = strings.ReplaceAll(m.File, "[kernel.kallsyms]", "")
+ const prefix = "[kernel.kallsyms]"
+ if strings.HasPrefix(m.File, prefix) {
+ m.KernelRelocationSymbol = m.File[len(prefix):]
}
-
}
functions := make(map[uint64]*Function, len(p.Function))
@@ -530,6 +530,7 @@ func (p *Line) decoder() []decoder {
func (p *Line) encode(b *buffer) {
encodeUint64Opt(b, 1, p.functionIDX)
encodeInt64Opt(b, 2, p.Line)
+ encodeInt64Opt(b, 3, p.Column)
}
var lineDecoder = []decoder{
@@ -538,6 +539,8 @@ var lineDecoder = []decoder{
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
// optional int64 line = 2
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
+ // optional int64 column = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) },
}
func (p *Function) decoder() []decoder {
diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go
index 91f45e53c..4580bab18 100644
--- a/vendor/github.com/google/pprof/profile/legacy_java_profile.go
+++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go
@@ -56,7 +56,7 @@ func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte
}
// Strip out addresses for better merge.
- if err = p.Aggregate(true, true, true, true, false); err != nil {
+ if err = p.Aggregate(true, true, true, true, false, false); err != nil {
return nil, err
}
@@ -99,7 +99,7 @@ func parseJavaProfile(b []byte) (*Profile, error) {
}
// Strip out addresses for better merge.
- if err = p.Aggregate(true, true, true, true, false); err != nil {
+ if err = p.Aggregate(true, true, true, true, false, false); err != nil {
return nil, err
}
diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go
index 4b66282cb..eee0132e7 100644
--- a/vendor/github.com/google/pprof/profile/merge.go
+++ b/vendor/github.com/google/pprof/profile/merge.go
@@ -326,12 +326,13 @@ func (l *Location) key() locationKey {
key.addr -= l.Mapping.Start
key.mappingID = l.Mapping.ID
}
- lines := make([]string, len(l.Line)*2)
+ lines := make([]string, len(l.Line)*3)
for i, line := range l.Line {
if line.Function != nil {
lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
}
lines[i*2+1] = strconv.FormatInt(line.Line, 16)
+ lines[i*2+2] = strconv.FormatInt(line.Column, 16)
}
key.lines = strings.Join(lines, "|")
return key
@@ -418,6 +419,7 @@ func (pm *profileMerger) mapLine(src Line) Line {
ln := Line{
Function: pm.mapFunction(src.Function),
Line: src.Line,
+ Column: src.Column,
}
return ln
}
diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go
index 4ec00fe7d..62df80a55 100644
--- a/vendor/github.com/google/pprof/profile/profile.go
+++ b/vendor/github.com/google/pprof/profile/profile.go
@@ -72,9 +72,23 @@ type ValueType struct {
type Sample struct {
Location []*Location
Value []int64
- Label map[string][]string
+ // Label is a per-label-key map to values for string labels.
+ //
+ // In general, having multiple values for the given label key is strongly
+ // discouraged - see docs for the sample label field in profile.proto. The
+ // main reason this unlikely state is tracked here is to make the
+ // decoding->encoding roundtrip not lossy. But we expect that the value
+ // slices present in this map are always of length 1.
+ Label map[string][]string
+ // NumLabel is a per-label-key map to values for numeric labels. See a note
+ // above on handling multiple values for a label.
NumLabel map[string][]int64
- NumUnit map[string][]string
+ // NumUnit is a per-label-key map to the unit names of corresponding numeric
+ // label values. The unit info may be missing even if the label is in
+ // NumLabel, see the docs in profile.proto for details. When the value is
+ // slice is present and not nil, its length must be equal to the length of
+ // the corresponding value slice in NumLabel.
+ NumUnit map[string][]string
locationIDX []uint64
labelX []label
@@ -131,6 +145,7 @@ type Location struct {
type Line struct {
Function *Function
Line int64
+ Column int64
functionIDX uint64
}
@@ -422,7 +437,7 @@ func (p *Profile) CheckValid() error {
// Aggregate merges the locations in the profile into equivalence
// classes preserving the request attributes. It also updates the
// samples to point to the merged locations.
-func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error {
+func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error {
for _, m := range p.Mapping {
m.HasInlineFrames = m.HasInlineFrames && inlineFrame
m.HasFunctions = m.HasFunctions && function
@@ -444,7 +459,7 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address
}
// Aggregate locations
- if !inlineFrame || !address || !linenumber {
+ if !inlineFrame || !address || !linenumber || !columnnumber {
for _, l := range p.Location {
if !inlineFrame && len(l.Line) > 1 {
l.Line = l.Line[len(l.Line)-1:]
@@ -452,6 +467,12 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address
if !linenumber {
for i := range l.Line {
l.Line[i].Line = 0
+ l.Line[i].Column = 0
+ }
+ }
+ if !columnnumber {
+ for i := range l.Line {
+ l.Line[i].Column = 0
}
}
if !address {
@@ -613,10 +634,11 @@ func (l *Location) string() string {
for li := range l.Line {
lnStr := "??"
if fn := l.Line[li].Function; fn != nil {
- lnStr = fmt.Sprintf("%s %s:%d s=%d",
+ lnStr = fmt.Sprintf("%s %s:%d:%d s=%d",
fn.Name,
fn.Filename,
l.Line[li].Line,
+ l.Line[li].Column,
fn.StartLine)
if fn.Name != fn.SystemName {
lnStr = lnStr + "(" + fn.SystemName + ")"
@@ -715,6 +737,35 @@ func (s *Sample) HasLabel(key, value string) bool {
return false
}
+// SetNumLabel sets the specified key to the specified value for all samples in the
+// profile. "unit" is a slice that describes the units that each corresponding member
+// of "values" is measured in (e.g. bytes or seconds). If there is no relevant
+// unit for a given value, that member of "unit" should be the empty string.
+// "unit" must either have the same length as "value", or be nil.
+func (p *Profile) SetNumLabel(key string, value []int64, unit []string) {
+ for _, sample := range p.Sample {
+ if sample.NumLabel == nil {
+ sample.NumLabel = map[string][]int64{key: value}
+ } else {
+ sample.NumLabel[key] = value
+ }
+ if sample.NumUnit == nil {
+ sample.NumUnit = map[string][]string{key: unit}
+ } else {
+ sample.NumUnit[key] = unit
+ }
+ }
+}
+
+// RemoveNumLabel removes all numerical labels associated with the specified key for all
+// samples in the profile.
+func (p *Profile) RemoveNumLabel(key string) {
+ for _, sample := range p.Sample {
+ delete(sample.NumLabel, key)
+ delete(sample.NumUnit, key)
+ }
+}
+
// DiffBaseSample returns true if a sample belongs to the diff base and false
// otherwise.
func (s *Sample) DiffBaseSample() bool {
diff --git a/vendor/github.com/moby/term/doc.go b/vendor/github.com/moby/term/doc.go
deleted file mode 100644
index c9bc03244..000000000
--- a/vendor/github.com/moby/term/doc.go
+++ /dev/null
@@ -1,3 +0,0 @@
-// Package term provides structures and helper functions to work with
-// terminal (state, sizes).
-package term
diff --git a/vendor/github.com/moby/term/tc.go b/vendor/github.com/moby/term/tc.go
new file mode 100644
index 000000000..8a5e09f58
--- /dev/null
+++ b/vendor/github.com/moby/term/tc.go
@@ -0,0 +1,20 @@
+//go:build !windows
+// +build !windows
+
+package term
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func tcget(fd uintptr) (*Termios, error) {
+ p, err := unix.IoctlGetTermios(int(fd), getTermios)
+ if err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+func tcset(fd uintptr, p *Termios) error {
+ return unix.IoctlSetTermios(int(fd), setTermios, p)
+}
diff --git a/vendor/github.com/moby/term/term.go b/vendor/github.com/moby/term/term.go
index f9d8988ef..2dd3d090d 100644
--- a/vendor/github.com/moby/term/term.go
+++ b/vendor/github.com/moby/term/term.go
@@ -1,85 +1,100 @@
+//go:build !windows
+// +build !windows
+
+// Package term provides structures and helper functions to work with
+// terminal (state, sizes).
package term
-import "io"
+import (
+ "errors"
+ "io"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// ErrInvalidState is returned if the state of the terminal is invalid.
+var ErrInvalidState = errors.New("Invalid terminal state")
-// State holds the platform-specific state / console mode for the terminal.
-type State terminalState
+// State represents the state of the terminal.
+type State struct {
+ termios Termios
+}
// Winsize represents the size of the terminal window.
type Winsize struct {
Height uint16
Width uint16
-
- // Only used on Unix
- x uint16
- y uint16
+ x uint16
+ y uint16
}
// StdStreams returns the standard streams (stdin, stdout, stderr).
-//
-// On Windows, it attempts to turn on VT handling on all std handles if
-// supported, or falls back to terminal emulation. On Unix, this returns
-// the standard [os.Stdin], [os.Stdout] and [os.Stderr].
func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
- return stdStreams()
+ return os.Stdin, os.Stdout, os.Stderr
}
// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
-func GetFdInfo(in interface{}) (fd uintptr, isTerminal bool) {
- return getFdInfo(in)
-}
-
-// GetWinsize returns the window size based on the specified file descriptor.
-func GetWinsize(fd uintptr) (*Winsize, error) {
- return getWinsize(fd)
-}
-
-// SetWinsize tries to set the specified window size for the specified file
-// descriptor. It is only implemented on Unix, and returns an error on Windows.
-func SetWinsize(fd uintptr, ws *Winsize) error {
- return setWinsize(fd, ws)
+func GetFdInfo(in interface{}) (uintptr, bool) {
+ var inFd uintptr
+ var isTerminalIn bool
+ if file, ok := in.(*os.File); ok {
+ inFd = file.Fd()
+ isTerminalIn = IsTerminal(inFd)
+ }
+ return inFd, isTerminalIn
}
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd uintptr) bool {
- return isTerminal(fd)
+ _, err := tcget(fd)
+ return err == nil
}
// RestoreTerminal restores the terminal connected to the given file descriptor
// to a previous state.
func RestoreTerminal(fd uintptr, state *State) error {
- return restoreTerminal(fd, state)
+ if state == nil {
+ return ErrInvalidState
+ }
+ return tcset(fd, &state.termios)
}
// SaveState saves the state of the terminal connected to the given file descriptor.
func SaveState(fd uintptr) (*State, error) {
- return saveState(fd)
+ termios, err := tcget(fd)
+ if err != nil {
+ return nil, err
+ }
+ return &State{termios: *termios}, nil
}
// DisableEcho applies the specified state to the terminal connected to the file
// descriptor, with echo disabled.
func DisableEcho(fd uintptr, state *State) error {
- return disableEcho(fd, state)
+ newState := state.termios
+ newState.Lflag &^= unix.ECHO
+
+ if err := tcset(fd, &newState); err != nil {
+ return err
+ }
+ return nil
}
// SetRawTerminal puts the terminal connected to the given file descriptor into
-// raw mode and returns the previous state. On UNIX, this is the equivalent of
-// [MakeRaw], and puts both the input and output into raw mode. On Windows, it
-// only puts the input into raw mode.
-func SetRawTerminal(fd uintptr) (previousState *State, err error) {
- return setRawTerminal(fd)
+// raw mode and returns the previous state. On UNIX, this puts both the input
+// and output into raw mode. On Windows, it only puts the input into raw mode.
+func SetRawTerminal(fd uintptr) (*State, error) {
+ oldState, err := MakeRaw(fd)
+ if err != nil {
+ return nil, err
+ }
+ return oldState, err
}
// SetRawTerminalOutput puts the output of terminal connected to the given file
// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
// state. On Windows, it disables LF -> CRLF translation.
-func SetRawTerminalOutput(fd uintptr) (previousState *State, err error) {
- return setRawTerminalOutput(fd)
-}
-
-// MakeRaw puts the terminal (Windows Console) connected to the
-// given file descriptor into raw mode and returns the previous state of
-// the terminal so that it can be restored.
-func MakeRaw(fd uintptr) (previousState *State, err error) {
- return makeRaw(fd)
+func SetRawTerminalOutput(fd uintptr) (*State, error) {
+ return nil, nil
}
diff --git a/vendor/github.com/moby/term/term_unix.go b/vendor/github.com/moby/term/term_unix.go
deleted file mode 100644
index 2ec7706a1..000000000
--- a/vendor/github.com/moby/term/term_unix.go
+++ /dev/null
@@ -1,98 +0,0 @@
-//go:build !windows
-// +build !windows
-
-package term
-
-import (
- "errors"
- "io"
- "os"
-
- "golang.org/x/sys/unix"
-)
-
-// ErrInvalidState is returned if the state of the terminal is invalid.
-//
-// Deprecated: ErrInvalidState is no longer used.
-var ErrInvalidState = errors.New("Invalid terminal state")
-
-// terminalState holds the platform-specific state / console mode for the terminal.
-type terminalState struct {
- termios unix.Termios
-}
-
-func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
- return os.Stdin, os.Stdout, os.Stderr
-}
-
-func getFdInfo(in interface{}) (uintptr, bool) {
- var inFd uintptr
- var isTerminalIn bool
- if file, ok := in.(*os.File); ok {
- inFd = file.Fd()
- isTerminalIn = isTerminal(inFd)
- }
- return inFd, isTerminalIn
-}
-
-func getWinsize(fd uintptr) (*Winsize, error) {
- uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
- ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel}
- return ws, err
-}
-
-func setWinsize(fd uintptr, ws *Winsize) error {
- return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &unix.Winsize{
- Row: ws.Height,
- Col: ws.Width,
- Xpixel: ws.x,
- Ypixel: ws.y,
- })
-}
-
-func isTerminal(fd uintptr) bool {
- _, err := tcget(fd)
- return err == nil
-}
-
-func restoreTerminal(fd uintptr, state *State) error {
- if state == nil {
- return errors.New("invalid terminal state")
- }
- return tcset(fd, &state.termios)
-}
-
-func saveState(fd uintptr) (*State, error) {
- termios, err := tcget(fd)
- if err != nil {
- return nil, err
- }
- return &State{termios: *termios}, nil
-}
-
-func disableEcho(fd uintptr, state *State) error {
- newState := state.termios
- newState.Lflag &^= unix.ECHO
-
- return tcset(fd, &newState)
-}
-
-func setRawTerminal(fd uintptr) (*State, error) {
- return makeRaw(fd)
-}
-
-func setRawTerminalOutput(fd uintptr) (*State, error) {
- return nil, nil
-}
-
-func tcget(fd uintptr) (*unix.Termios, error) {
- p, err := unix.IoctlGetTermios(int(fd), getTermios)
- if err != nil {
- return nil, err
- }
- return p, nil
-}
-
-func tcset(fd uintptr, p *unix.Termios) error {
- return unix.IoctlSetTermios(int(fd), setTermios, p)
-}
diff --git a/vendor/github.com/moby/term/term_windows.go b/vendor/github.com/moby/term/term_windows.go
index 81ccff042..3cdc8edbd 100644
--- a/vendor/github.com/moby/term/term_windows.go
+++ b/vendor/github.com/moby/term/term_windows.go
@@ -1,7 +1,6 @@
package term
import (
- "fmt"
"io"
"os"
"os/signal"
@@ -10,15 +9,22 @@ import (
"golang.org/x/sys/windows"
)
-// terminalState holds the platform-specific state / console mode for the terminal.
-type terminalState struct {
+// State holds the console mode for the terminal.
+type State struct {
mode uint32
}
+// Winsize is used for window size.
+type Winsize struct {
+ Height uint16
+ Width uint16
+}
+
// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console
var vtInputSupported bool
-func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
+// StdStreams returns the standard streams (stdin, stdout, stderr).
+func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
// Turn on VT handling on all std handles, if possible. This might
// fail, in which case we will fall back to terminal emulation.
var (
@@ -81,14 +87,16 @@ func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
stdErr = os.Stderr
}
- return stdIn, stdOut, stdErr
+ return
}
-func getFdInfo(in interface{}) (uintptr, bool) {
+// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
+func GetFdInfo(in interface{}) (uintptr, bool) {
return windowsconsole.GetHandleInfo(in)
}
-func getWinsize(fd uintptr) (*Winsize, error) {
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
var info windows.ConsoleScreenBufferInfo
if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil {
return nil, err
@@ -102,21 +110,21 @@ func getWinsize(fd uintptr) (*Winsize, error) {
return winsize, nil
}
-func setWinsize(fd uintptr, ws *Winsize) error {
- return fmt.Errorf("not implemented on Windows")
-}
-
-func isTerminal(fd uintptr) bool {
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd uintptr) bool {
var mode uint32
err := windows.GetConsoleMode(windows.Handle(fd), &mode)
return err == nil
}
-func restoreTerminal(fd uintptr, state *State) error {
+// RestoreTerminal restores the terminal connected to the given file descriptor
+// to a previous state.
+func RestoreTerminal(fd uintptr, state *State) error {
return windows.SetConsoleMode(windows.Handle(fd), state.mode)
}
-func saveState(fd uintptr) (*State, error) {
+// SaveState saves the state of the terminal connected to the given file descriptor.
+func SaveState(fd uintptr) (*State, error) {
var mode uint32
if err := windows.GetConsoleMode(windows.Handle(fd), &mode); err != nil {
@@ -126,8 +134,9 @@ func saveState(fd uintptr) (*State, error) {
return &State{mode: mode}, nil
}
-func disableEcho(fd uintptr, state *State) error {
- // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+// DisableEcho disables echo for the terminal connected to the given file descriptor.
+// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+func DisableEcho(fd uintptr, state *State) error {
mode := state.mode
mode &^= windows.ENABLE_ECHO_INPUT
mode |= windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT
@@ -141,27 +150,69 @@ func disableEcho(fd uintptr, state *State) error {
return nil
}
-func setRawTerminal(fd uintptr) (*State, error) {
- oldState, err := MakeRaw(fd)
+// SetRawTerminal puts the terminal connected to the given file descriptor into
+// raw mode and returns the previous state. On UNIX, this puts both the input
+// and output into raw mode. On Windows, it only puts the input into raw mode.
+func SetRawTerminal(fd uintptr) (*State, error) {
+ state, err := MakeRaw(fd)
if err != nil {
return nil, err
}
// Register an interrupt handler to catch and restore prior state
- restoreAtInterrupt(fd, oldState)
- return oldState, err
+ restoreAtInterrupt(fd, state)
+ return state, err
}
-func setRawTerminalOutput(fd uintptr) (*State, error) {
- oldState, err := saveState(fd)
+// SetRawTerminalOutput puts the output of terminal connected to the given file
+// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
+// state. On Windows, it disables LF -> CRLF translation.
+func SetRawTerminalOutput(fd uintptr) (*State, error) {
+ state, err := SaveState(fd)
if err != nil {
return nil, err
}
// Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this
// version of Windows.
- _ = windows.SetConsoleMode(windows.Handle(fd), oldState.mode|windows.DISABLE_NEWLINE_AUTO_RETURN)
- return oldState, err
+ _ = windows.SetConsoleMode(windows.Handle(fd), state.mode|windows.DISABLE_NEWLINE_AUTO_RETURN)
+ return state, err
+}
+
+// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ state, err := SaveState(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ mode := state.mode
+
+ // See
+ // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
+ // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+
+ // Disable these modes
+ mode &^= windows.ENABLE_ECHO_INPUT
+ mode &^= windows.ENABLE_LINE_INPUT
+ mode &^= windows.ENABLE_MOUSE_INPUT
+ mode &^= windows.ENABLE_WINDOW_INPUT
+ mode &^= windows.ENABLE_PROCESSED_INPUT
+
+ // Enable these modes
+ mode |= windows.ENABLE_EXTENDED_FLAGS
+ mode |= windows.ENABLE_INSERT_MODE
+ mode |= windows.ENABLE_QUICK_EDIT_MODE
+ if vtInputSupported {
+ mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT
+ }
+
+ err = windows.SetConsoleMode(windows.Handle(fd), mode)
+ if err != nil {
+ return nil, err
+ }
+ return state, nil
}
func restoreAtInterrupt(fd uintptr, state *State) {
diff --git a/vendor/github.com/moby/term/termios_unix.go b/vendor/github.com/moby/term/termios.go
similarity index 50%
rename from vendor/github.com/moby/term/termios_unix.go
rename to vendor/github.com/moby/term/termios.go
index 60c823783..99c0f7de6 100644
--- a/vendor/github.com/moby/term/termios_unix.go
+++ b/vendor/github.com/moby/term/termios.go
@@ -8,11 +8,12 @@ import (
)
// Termios is the Unix API for terminal I/O.
-//
-// Deprecated: use [unix.Termios].
type Termios = unix.Termios
-func makeRaw(fd uintptr) (*State, error) {
+// MakeRaw puts the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
termios, err := tcget(fd)
if err != nil {
return nil, err
@@ -20,10 +21,10 @@ func makeRaw(fd uintptr) (*State, error) {
oldState := State{termios: *termios}
- termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
+ termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
termios.Oflag &^= unix.OPOST
- termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
- termios.Cflag &^= unix.CSIZE | unix.PARENB
+ termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
+ termios.Cflag &^= (unix.CSIZE | unix.PARENB)
termios.Cflag |= unix.CS8
termios.Cc[unix.VMIN] = 1
termios.Cc[unix.VTIME] = 0
diff --git a/vendor/github.com/moby/term/termios_windows.go b/vendor/github.com/moby/term/termios_windows.go
deleted file mode 100644
index 5be4e7601..000000000
--- a/vendor/github.com/moby/term/termios_windows.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package term
-
-import "golang.org/x/sys/windows"
-
-func makeRaw(fd uintptr) (*State, error) {
- state, err := SaveState(fd)
- if err != nil {
- return nil, err
- }
-
- mode := state.mode
-
- // See
- // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
- // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
-
- // Disable these modes
- mode &^= windows.ENABLE_ECHO_INPUT
- mode &^= windows.ENABLE_LINE_INPUT
- mode &^= windows.ENABLE_MOUSE_INPUT
- mode &^= windows.ENABLE_WINDOW_INPUT
- mode &^= windows.ENABLE_PROCESSED_INPUT
-
- // Enable these modes
- mode |= windows.ENABLE_EXTENDED_FLAGS
- mode |= windows.ENABLE_INSERT_MODE
- mode |= windows.ENABLE_QUICK_EDIT_MODE
- if vtInputSupported {
- mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT
- }
-
- err = windows.SetConsoleMode(windows.Handle(fd), mode)
- if err != nil {
- return nil, err
- }
- return state, nil
-}
diff --git a/vendor/github.com/moby/term/windows/ansi_reader.go b/vendor/github.com/moby/term/windows/ansi_reader.go
index fb34c547a..f32aa537e 100644
--- a/vendor/github.com/moby/term/windows/ansi_reader.go
+++ b/vendor/github.com/moby/term/windows/ansi_reader.go
@@ -195,10 +195,10 @@ func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) stri
// +Key generates ESC N Key
if !control && alt {
- return ansiterm.KEY_ESC_N + strings.ToLower(string(rune(keyEvent.UnicodeChar)))
+ return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar))
}
- return string(rune(keyEvent.UnicodeChar))
+ return string(keyEvent.UnicodeChar)
}
// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string.
diff --git a/vendor/github.com/moby/term/windows/console.go b/vendor/github.com/moby/term/windows/console.go
index 21e57bd52..116b74e8f 100644
--- a/vendor/github.com/moby/term/windows/console.go
+++ b/vendor/github.com/moby/term/windows/console.go
@@ -30,11 +30,8 @@ func GetHandleInfo(in interface{}) (uintptr, bool) {
// IsConsole returns true if the given file descriptor is a Windows Console.
// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console.
-//
-// Deprecated: use [windows.GetConsoleMode] or [golang.org/x/term.IsTerminal].
-func IsConsole(fd uintptr) bool {
- return isConsole(fd)
-}
+// Deprecated: use golang.org/x/sys/windows.GetConsoleMode() or golang.org/x/term.IsTerminal()
+var IsConsole = isConsole
func isConsole(fd uintptr) bool {
var mode uint32
diff --git a/vendor/github.com/moby/term/winsize.go b/vendor/github.com/moby/term/winsize.go
new file mode 100644
index 000000000..bea8d4595
--- /dev/null
+++ b/vendor/github.com/moby/term/winsize.go
@@ -0,0 +1,21 @@
+//go:build !windows
+// +build !windows
+
+package term
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+ uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
+ ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel}
+ return ws, err
+}
+
+// SetWinsize tries to set the specified window size for the specified file descriptor.
+func SetWinsize(fd uintptr, ws *Winsize) error {
+ uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y}
+ return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
index 9a65dd10c..76577dc78 100644
--- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
+++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
@@ -1,3 +1,83 @@
+## 2.19.0
+
+### Features
+
+[Label Sets](https://onsi.github.io/ginkgo/#label-sets) allow for more expressive and flexible label filtering.
+
+## 2.18.0
+
+### Features
+- Add --slience-skips and --force-newlines [f010b65]
+- fail when no tests were run and --fail-on-empty was set [d80eebe]
+
+### Fixes
+- Fix table entry context edge case [42013d6]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.20.0 to 0.21.0 (#1406) [fcf1fd7]
+- Bump github.com/onsi/gomega from 1.33.0 to 1.33.1 (#1399) [8bb14fd]
+- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#1407) [04bfad7]
+
+## 2.17.3
+
+### Fixes
+`ginkgo watch` now ignores hidden files [bde6e00]
+
+## 2.17.2
+
+### Fixes
+- fix: close files [32259c8]
+- fix github output log level for skipped specs [780e7a3]
+
+### Maintenance
+- Bump github.com/google/pprof [d91fe4e]
+- Bump github.com/go-task/slim-sprig to v3 [8cb662e]
+- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1391) [3134422]
+- Bump github-pages from 230 to 231 in /docs (#1384) [eca81b4]
+- Bump golang.org/x/tools from 0.19.0 to 0.20.0 (#1383) [760def8]
+- Bump golang.org/x/net from 0.23.0 to 0.24.0 (#1381) [4ce33f4]
+- Fix test for gomega version bump [f2fcd97]
+- Bump github.com/onsi/gomega from 1.30.0 to 1.33.0 (#1390) [fd622d2]
+- Bump golang.org/x/tools from 0.17.0 to 0.19.0 (#1368) [5474a26]
+- Bump github-pages from 229 to 230 in /docs (#1359) [e6d1170]
+- Bump google.golang.org/protobuf from 1.28.0 to 1.33.0 (#1374) [7f447b2]
+- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#1380) [f15239a]
+
+## 2.17.1
+
+### Fixes
+- If the user sets --seed=0, make sure all parallel nodes get the same seed [af0330d]
+
+## 2.17.0
+
+### Features
+
+- add `--github-output` for nicer output in github actions [e8a2056]
+
+### Maintenance
+
+- fix typo in core_dsl.go [977bc6f]
+- Fix typo in docs [e297e7b]
+
+## 2.16.0
+
+### Features
+- add SpecContext to reporting nodes
+
+### Fixes
+- merge coverages instead of combining them (#1329) (#1340) [23f0cc5]
+- core_dsl: disable Getwd() with environment variable (#1357) [cd418b7]
+
+### Maintenance
+- docs/index.md: Typo [2cebe8d]
+- fix docs [06de431]
+- chore: test with Go 1.22 (#1352) [898cba9]
+- Bump golang.org/x/tools from 0.16.1 to 0.17.0 (#1336) [17ae120]
+- Bump golang.org/x/sys from 0.15.0 to 0.16.0 (#1327) [5a179ed]
+- Bump github.com/go-logr/logr from 1.3.0 to 1.4.1 (#1321) [a1e6b69]
+- Bump github-pages and jekyll-feed in /docs (#1351) [d52951d]
+- Fix docs for handling failures in goroutines (#1339) [4471b2e]
+
## 2.15.0
### Features
diff --git a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
index 1da92fe7e..80de566a5 100644
--- a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
+++ b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
@@ -6,8 +6,10 @@ Your contributions to Ginkgo are essential for its long-term maintenance and imp
- Ensure adequate test coverage:
- When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
- When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
-- Make sure all the tests succeed via `ginkgo -r -p`
-- Vet your changes via `go vet ./...`
-- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes.
+- Run `make` or:
+ - Install ginkgo locally via `go install ./...`
+ - Make sure all the tests succeed via `ginkgo -r -p`
+ - Vet your changes via `go vet ./...`
+- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle && bundle exec jekyll serve` in the `docs` directory to preview your changes.
-Thanks for supporting Ginkgo!
\ No newline at end of file
+Thanks for supporting Ginkgo!
diff --git a/vendor/github.com/onsi/ginkgo/v2/Makefile b/vendor/github.com/onsi/ginkgo/v2/Makefile
new file mode 100644
index 000000000..cb099aff9
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/Makefile
@@ -0,0 +1,11 @@
+# default task since it's first
+.PHONY: all
+all: vet test
+
+.PHONY: test
+test:
+ go run github.com/onsi/ginkgo/v2/ginkgo -r -p
+
+.PHONY: vet
+vet:
+ go vet ./...
diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
index 2d7a70ecc..a3e8237e9 100644
--- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
@@ -292,7 +292,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
err = global.Suite.BuildTree()
exitIfErr(err)
- suitePath, err := os.Getwd()
+ suitePath, err := getwd()
exitIfErr(err)
suitePath, err = filepath.Abs(suitePath)
exitIfErr(err)
@@ -345,6 +345,15 @@ func extractSuiteConfiguration(args []interface{}) Labels {
return suiteLabels
}
+func getwd() (string, error) {
+ if !strings.EqualFold(os.Getenv("GINKGO_PRESERVE_CACHE"), "true") {
+ // Getwd calls os.Getenv("PWD"), which breaks test caching if the cache
+ // is shared between two different directories with the same test code.
+ return os.Getwd()
+ }
+ return "", nil
+}
+
/*
PreviewSpecs walks the testing tree and produces a report without actually invoking the specs.
See http://onsi.github.io/ginkgo/#previewing-specs for more information.
@@ -369,7 +378,7 @@ func PreviewSpecs(description string, args ...any) Report {
err = global.Suite.BuildTree()
exitIfErr(err)
- suitePath, err := os.Getwd()
+ suitePath, err := getwd()
exitIfErr(err)
suitePath, err = filepath.Abs(suitePath)
exitIfErr(err)
@@ -783,8 +792,8 @@ DeferCleanup can be passed:
For example:
BeforeEach(func() {
- DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO"))
- os.SetEnv("FOO", "BAR")
+ DeferCleanup(os.Setenv, "FOO", os.GetEnv("FOO"))
+ os.Setenv("FOO", "BAR")
})
will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec.
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
index 73aff0b7a..b2dc59be6 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
@@ -7,7 +7,7 @@ import (
"os"
"text/template"
- sprig "github.com/go-task/slim-sprig"
+ sprig "github.com/go-task/slim-sprig/v3"
"github.com/onsi/ginkgo/v2/ginkgo/command"
"github.com/onsi/ginkgo/v2/ginkgo/internal"
"github.com/onsi/ginkgo/v2/types"
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
index be01dec97..cf3b7cb6d 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
@@ -10,7 +10,7 @@ import (
"strings"
"text/template"
- sprig "github.com/go-task/slim-sprig"
+ sprig "github.com/go-task/slim-sprig/v3"
"github.com/onsi/ginkgo/v2/ginkgo/command"
"github.com/onsi/ginkgo/v2/ginkgo/internal"
"github.com/onsi/ginkgo/v2/types"
@@ -174,6 +174,7 @@ func moduleName(modRoot string) string {
if err != nil {
return ""
}
+ defer modFile.Close()
mod := make([]byte, 128)
_, err = modFile.Read(mod)
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
new file mode 100644
index 000000000..3c5079ff4
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
@@ -0,0 +1,129 @@
+// Copyright (c) 2015, Wade Simmons
+// All rights reserved.
+
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package gocovmerge takes the results from multiple `go test -coverprofile`
+// runs and merges them into one profile
+
+// this file was originally taken from the gocovmerge project
+// see also: https://go.shabbyrobe.org/gocovmerge
+package internal
+
+import (
+ "fmt"
+ "io"
+ "sort"
+
+ "golang.org/x/tools/cover"
+)
+
+func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile {
+ i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName })
+ if i < len(profiles) && profiles[i].FileName == p.FileName {
+ MergeCoverProfiles(profiles[i], p)
+ } else {
+ profiles = append(profiles, nil)
+ copy(profiles[i+1:], profiles[i:])
+ profiles[i] = p
+ }
+ return profiles
+}
+
+func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error {
+ if len(profiles) == 0 {
+ return nil
+ }
+ if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil {
+ return err
+ }
+ for _, p := range profiles {
+ for _, b := range p.Blocks {
+ if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error {
+ if into.Mode != merge.Mode {
+ return fmt.Errorf("cannot merge profiles with different modes")
+ }
+ // Since the blocks are sorted, we can keep track of where the last block
+ // was inserted and only look at the blocks after that as targets for merge
+ startIndex := 0
+ for _, b := range merge.Blocks {
+ var err error
+ startIndex, err = mergeProfileBlock(into, b, startIndex)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) {
+ sortFunc := func(i int) bool {
+ pi := p.Blocks[i+startIndex]
+ return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol)
+ }
+
+ i := 0
+ if sortFunc(i) != true {
+ i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
+ }
+
+ i += startIndex
+ if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol {
+ if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol {
+ return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb)
+ }
+ switch p.Mode {
+ case "set":
+ p.Blocks[i].Count |= pb.Count
+ case "count", "atomic":
+ p.Blocks[i].Count += pb.Count
+ default:
+ return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode)
+ }
+
+ } else {
+ if i > 0 {
+ pa := p.Blocks[i-1]
+ if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) {
+ return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb)
+ }
+ }
+ if i < len(p.Blocks)-1 {
+ pa := p.Blocks[i+1]
+ if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) {
+ return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb)
+ }
+ }
+ p.Blocks = append(p.Blocks, cover.ProfileBlock{})
+ copy(p.Blocks[i+1:], p.Blocks[i:])
+ p.Blocks[i] = pb
+ }
+
+ return i + 1, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
index 26de28b57..8e16d2bb0 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
@@ -1,7 +1,6 @@
package internal
import (
- "bytes"
"fmt"
"os"
"os/exec"
@@ -12,6 +11,7 @@ import (
"github.com/google/pprof/profile"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
+ "golang.org/x/tools/cover"
)
func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
@@ -144,38 +144,27 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC
return messages, nil
}
-// loads each profile, combines them, deletes them, stores them in destination
+// loads each profile, merges them, deletes them, stores them in destination
func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
- combined := &bytes.Buffer{}
- modeRegex := regexp.MustCompile(`^mode: .*\n`)
- for i, profile := range profiles {
- contents, err := os.ReadFile(profile)
+ var merged []*cover.Profile
+ for _, file := range profiles {
+ parsedProfiles, err := cover.ParseProfiles(file)
if err != nil {
- return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error())
+ return err
}
- os.Remove(profile)
-
- // remove the cover mode line from every file
- // except the first one
- if i > 0 {
- contents = modeRegex.ReplaceAll(contents, []byte{})
- }
-
- _, err = combined.Write(contents)
-
- // Add a newline to the end of every file if missing.
- if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
- _, err = combined.Write([]byte("\n"))
- }
-
- if err != nil {
- return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error())
+ os.Remove(file)
+ for _, p := range parsedProfiles {
+ merged = AddCoverProfile(merged, p)
}
}
-
- err := os.WriteFile(destination, combined.Bytes(), 0666)
+ dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ return err
+ }
+ defer dst.Close()
+ err = DumpCoverProfiles(merged, dst)
if err != nil {
- return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error())
+ return err
}
return nil
}
@@ -208,6 +197,7 @@ func MergeProfiles(profilePaths []string, destination string) error {
return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error())
}
prof, err := profile.Parse(proFile)
+ _ = proFile.Close()
if err != nil {
return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error())
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
index 17d052bdc..0e6ae1f29 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
"regexp"
+ "strings"
"time"
)
@@ -79,6 +80,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti
continue
}
+ if isHiddenFile(info) {
+ continue
+ }
+
if goTestRegExp.MatchString(info.Name()) {
testHash += p.hashForFileInfo(info)
if info.ModTime().After(testModifiedTime) {
@@ -103,6 +108,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti
return
}
+func isHiddenFile(info os.FileInfo) bool {
+ return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_")
+}
+
func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go
index 16f0dc227..6a15f19ae 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go
@@ -5,9 +5,8 @@ import (
"fmt"
"reflect"
"sort"
- "time"
-
"sync"
+ "time"
"github.com/onsi/ginkgo/v2/types"
)
@@ -16,8 +15,8 @@ var _global_node_id_counter = uint(0)
var _global_id_mutex = &sync.Mutex{}
func UniqueNodeID() uint {
- //There's a reace in the internal integration tests if we don't make
- //accessing _global_node_id_counter safe across goroutines.
+ // There's a reace in the internal integration tests if we don't make
+ // accessing _global_node_id_counter safe across goroutines.
_global_id_mutex.Lock()
defer _global_id_mutex.Unlock()
_global_node_id_counter += 1
@@ -44,8 +43,8 @@ type Node struct {
SynchronizedAfterSuiteProc1Body func(SpecContext)
SynchronizedAfterSuiteProc1BodyHasContext bool
- ReportEachBody func(types.SpecReport)
- ReportSuiteBody func(types.Report)
+ ReportEachBody func(SpecContext, types.SpecReport)
+ ReportSuiteBody func(SpecContext, types.Report)
MarkedFocus bool
MarkedPending bool
@@ -209,7 +208,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
args = unrollInterfaceSlice(args)
remainingArgs := []interface{}{}
- //First get the CodeLocation up-to-date
+ // First get the CodeLocation up-to-date
for _, arg := range args {
switch v := arg.(type) {
case Offset:
@@ -225,11 +224,11 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
trackedFunctionError := false
args = remainingArgs
remainingArgs = []interface{}{}
- //now process the rest of the args
+ // now process the rest of the args
for _, arg := range args {
switch t := reflect.TypeOf(arg); {
case t == reflect.TypeOf(float64(0)):
- break //ignore deprecated timeouts
+ break // ignore deprecated timeouts
case t == reflect.TypeOf(Focus):
node.MarkedFocus = bool(arg.(focusType))
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
@@ -325,7 +324,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
node.Body = func(SpecContext) { body() }
} else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) {
if node.ReportEachBody == nil {
- node.ReportEachBody = arg.(func(types.SpecReport))
+ if fn, ok := arg.(func(types.SpecReport)); ok {
+ node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) }
+ } else {
+ node.ReportEachBody = arg.(func(SpecContext, types.SpecReport))
+ node.HasContext = true
+ }
} else {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
@@ -333,7 +337,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
}
} else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
if node.ReportSuiteBody == nil {
- node.ReportSuiteBody = arg.(func(types.Report))
+ if fn, ok := arg.(func(types.Report)); ok {
+ node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) }
+ } else {
+ node.ReportSuiteBody = arg.(func(SpecContext, types.Report))
+ node.HasContext = true
+ }
} else {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
@@ -395,7 +404,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
}
}
- //validations
+ // validations
if node.MarkedPending && node.MarkedFocus {
appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
index 2b4db48af..a3c9e6bf1 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
@@ -489,10 +489,15 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s
newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx]))
}
- if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending {
+ if suite.config.FailOnPending && specs.HasAnySpecsMarkedPending() {
suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set")
suite.report.SuiteSucceeded = false
}
+
+ if suite.config.FailOnEmpty && specs.CountWithoutSkip() == 0 {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected no specs ran and --fail-on-empty is set")
+ suite.report.SuiteSucceeded = false
+ }
}
if ranBeforeSuite {
@@ -594,8 +599,8 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
suite.writer.Truncate()
suite.outputInterceptor.StartInterceptingOutput()
report := suite.currentSpecReport
- nodes[i].Body = func(SpecContext) {
- nodes[i].ReportEachBody(report)
+ nodes[i].Body = func(ctx SpecContext) {
+ nodes[i].ReportEachBody(ctx, report)
}
state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i]))
@@ -762,7 +767,7 @@ func (suite *Suite) runReportSuiteNode(node Node, report types.Report) {
report = report.Add(aggregatedReport)
}
- node.Body = func(SpecContext) { node.ReportSuiteBody(report) }
+ node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) }
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
suite.currentSpecReport.EndTime = time.Now()
@@ -840,7 +845,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
timeoutInPlay = "node"
}
if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
- //we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
+ // we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
if node.NodeTimeout > 0 {
deadline = now.Add(node.NodeTimeout)
timeoutInPlay = "node"
@@ -918,9 +923,9 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
if outcomeFromRun != types.SpecStatePassed {
additionalFailure := types.AdditionalFailure{
State: outcomeFromRun,
- Failure: failure, //we make a copy - this will include all the configuration set up above...
+ Failure: failure, // we make a copy - this will include all the configuration set up above...
}
- //...and then we update the failure with the details from failureFromRun
+ // ...and then we update the failure with the details from failureFromRun
additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
additionalFailure.Failure.ProgressReport = types.ProgressReport{}
if outcome == types.SpecStateTimedout {
@@ -959,7 +964,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
// tell the spec to stop. it's important we generate the progress report first to make sure we capture where
// the spec is actually stuck
sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay))
- //and now we wait for the grace period
+ // and now we wait for the grace period
gracePeriodChannel = time.After(gracePeriod)
case <-interruptStatus.Channel:
interruptStatus = suite.interruptHandler.Status()
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
index 56b7be758..480730486 100644
--- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
@@ -182,10 +182,31 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) {
r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false)))
}
+func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) {
+ r.emitBlock("\n")
+ if r.conf.GithubOutput {
+ r.emitBlock(r.fi(1, "::group::%s", sectionName))
+ } else {
+ r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName))
+ }
+ fn()
+ if r.conf.GithubOutput {
+ r.emitBlock(r.fi(1, "::endgroup::"))
+ } else {
+ r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName))
+ }
+
+}
+
func (r *DefaultReporter) DidRun(report types.SpecReport) {
v := r.conf.Verbosity()
inParallel := report.RunningInParallel
+ //should we completely omit this spec?
+ if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips {
+ return
+ }
+
header := r.specDenoter
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
header = fmt.Sprintf("[%s]", report.LeafNodeType)
@@ -262,9 +283,12 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) {
}
}
- // If we have no content to show, jsut emit the header and return
+ // If we have no content to show, just emit the header and return
if !reportHasContent {
r.emit(r.f(highlightColor + header + "{{/}}"))
+ if r.conf.ForceNewlines {
+ r.emit("\n")
+ }
return
}
@@ -283,26 +307,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) {
//Emit Stdout/Stderr Output
if showSeparateStdSection {
- r.emitBlock("\n")
- r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}"))
- r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
- r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}"))
+ r.wrapTextBlock("Captured StdOut/StdErr Output", func() {
+ r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
+ })
}
if showSeparateVisibilityAlwaysReportsSection {
- r.emitBlock("\n")
- r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}"))
- for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
- r.emitReportEntry(1, entry)
- }
- r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}"))
+ r.wrapTextBlock("Report Entries", func() {
+ for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
+ r.emitReportEntry(1, entry)
+ }
+ })
}
if showTimeline {
- r.emitBlock("\n")
- r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}"))
- r.emitTimeline(1, report, timeline)
- r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}"))
+ r.wrapTextBlock("Timeline", func() {
+ r.emitTimeline(1, report, timeline)
+ })
}
// Emit Failure Message
@@ -405,7 +426,15 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f
func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) {
highlightColor := r.highlightColorForState(state)
r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message))
- r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ if r.conf.GithubOutput {
+ level := "error"
+ if state.Is(types.SpecStateSkipped) {
+ level = "notice"
+ }
+ r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ } else {
+ r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ }
if failure.ForwardedPanic != "" {
r.emitBlock("\n")
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
index 43244a9bd..562e0f62b 100644
--- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
@@ -177,6 +177,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
{"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")},
{"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")},
{"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)},
+ {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)},
{"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)},
{"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)},
{"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)},
@@ -324,6 +325,7 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error)
continue
}
err = xml.NewDecoder(f).Decode(&report)
+ _ = f.Close()
if err != nil {
messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
continue
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
index f33786a2d..aa1a35176 100644
--- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
@@ -74,12 +74,21 @@ func AddReportEntry(name string, args ...interface{}) {
/*
ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that
-receives a SpecReport. They are called before the spec starts.
+receives a SpecReport or both SpecContext and Report for interruptible behavior. They are called before the spec starts.
+
+Example:
+
+ ReportBeforeEach(func(report SpecReport) { // process report })
+ ReportBeforeEach(func(ctx SpecContext, report SpecReport) {
+ // process report
+ }), NodeTimeout(1 * time.Minute))
You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure.
You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
-func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool {
+func ReportBeforeEach(body any, args ...any) bool {
combinedArgs := []interface{}{body}
combinedArgs = append(combinedArgs, args...)
@@ -87,13 +96,23 @@ func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool {
}
/*
-ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that
-receives a SpecReport. They are called after the spec has completed and receive the final report for the spec.
+ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending.
+ReportAfterEach nodes take a function that receives a SpecReport or both SpecContext and Report for interruptible behavior.
+They are called after the spec has completed and receive the final report for the spec.
+
+Example:
+
+ ReportAfterEach(func(report SpecReport) { // process report })
+ ReportAfterEach(func(ctx SpecContext, report SpecReport) {
+ // process report
+ }), NodeTimeout(1 * time.Minute))
You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure.
You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
-func ReportAfterEach(body func(SpecReport), args ...interface{}) bool {
+func ReportAfterEach(body any, args ...any) bool {
combinedArgs := []interface{}{body}
combinedArgs = append(combinedArgs, args...)
@@ -101,7 +120,15 @@ func ReportAfterEach(body func(SpecReport), args ...interface{}) bool {
}
/*
-ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report.
+ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function
+that can either receive Report or both SpecContext and Report for interruptible behavior.
+
+Example Usage:
+
+ ReportBeforeSuite(func(r Report) { // process report })
+ ReportBeforeSuite(func(ctx SpecContext, r Report) {
+ // process report
+ }, NodeTimeout(1 * time.Minute))
They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite.
ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
@@ -112,18 +139,28 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
-func ReportBeforeSuite(body func(Report), args ...interface{}) bool {
+func ReportBeforeSuite(body any, args ...any) bool {
combinedArgs := []interface{}{body}
combinedArgs = append(combinedArgs, args...)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))
}
/*
-ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report.
+ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes execute at the suite's conclusion,
+and accept a function that can either receive Report or both SpecContext and Report for interruptible behavior.
+
+Example Usage:
+
+ ReportAfterSuite("Non-interruptible ReportAfterSuite", func(r Report) { // process report })
+ ReportAfterSuite("Interruptible ReportAfterSuite", func(ctx SpecContext, r Report) {
+ // process report
+ }, NodeTimeout(1 * time.Minute))
They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite.
-ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
+ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across
all parallel nodes
@@ -134,8 +171,10 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
-func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool {
+func ReportAfterSuite(text string, body any, args ...interface{}) bool {
combinedArgs := []interface{}{body}
combinedArgs = append(combinedArgs, args...)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...))
diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
index a3aef821b..c7de7a8be 100644
--- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
@@ -269,11 +269,15 @@ func generateTable(description string, isSubtree bool, args ...interface{}) {
internalNodeArgs = append(internalNodeArgs, entry.decorations...)
hasContext := false
- if internalBodyType.NumIn() > 0. {
+ if internalBodyType.NumIn() > 0 {
if internalBodyType.In(0).Implements(specContextType) {
hasContext = true
- } else if internalBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) {
+ } else if internalBodyType.In(0).Implements(contextType) {
hasContext = true
+ if len(entry.parameters) > 0 && reflect.TypeOf(entry.parameters[0]) != nil && reflect.TypeOf(entry.parameters[0]).Implements(contextType) {
+ // we allow you to pass in a non-nil context
+ hasContext = false
+ }
}
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go
index c88fc85a7..66463cf5e 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/config.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go
@@ -25,6 +25,7 @@ type SuiteConfig struct {
SkipFiles []string
LabelFilter string
FailOnPending bool
+ FailOnEmpty bool
FailFast bool
FlakeAttempts int
MustPassRepeatedly int
@@ -89,6 +90,9 @@ type ReporterConfig struct {
VeryVerbose bool
FullTrace bool
ShowNodeEvents bool
+ GithubOutput bool
+ SilenceSkips bool
+ ForceNewlines bool
JSONReport string
JUnitReport string
@@ -264,7 +268,7 @@ var FlagSections = GinkgoFlagSections{
// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI
var SuiteConfigFlags = GinkgoFlags{
{KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo",
- Usage: "The seed used to randomize the spec suite."},
+ Usage: "The seed used to randomize the spec suite.", AlwaysExport: true},
{KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags",
Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."},
@@ -274,6 +278,8 @@ var SuiteConfigFlags = GinkgoFlags{
Usage: "If set, ginkgo will stop running a test suite after a failure occurs."},
{KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags",
Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."},
+ {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure",
+ Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."},
{KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags",
Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."},
@@ -331,6 +337,12 @@ var ReporterConfigFlags = GinkgoFlags{
Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
{KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output",
Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"},
+ {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output",
+ Usage: "If set, default reporter prints easier to manage output in Github Actions."},
+ {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output",
+ Usage: "If set, default reporter will not print out skipped tests."},
+ {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output",
+ Usage: "If set, default reporter will ensure a newline appears after each test."},
{KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go
index 9186ae873..de69f3022 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go
@@ -24,7 +24,8 @@ type GinkgoFlag struct {
DeprecatedDocLink string
DeprecatedVersion string
- ExportAs string
+ ExportAs string
+ AlwaysExport bool
}
type GinkgoFlags []GinkgoFlag
@@ -431,7 +432,7 @@ func (ssv stringSliceVar) Set(s string) error {
return nil
}
-//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
+// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) {
result := []string{}
for _, flag := range flags {
@@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error)
iface := value.Interface()
switch value.Type() {
case reflect.TypeOf(string("")):
- if iface.(string) != "" {
+ if iface.(string) != "" || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
}
case reflect.TypeOf(int64(0)):
- if iface.(int64) != 0 {
+ if iface.(int64) != 0 || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
}
case reflect.TypeOf(float64(0)):
- if iface.(float64) != 0 {
+ if iface.(float64) != 0 || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%f", name, iface))
}
case reflect.TypeOf(int(0)):
- if iface.(int) != 0 {
+ if iface.(int) != 0 || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%d", name, iface))
}
case reflect.TypeOf(bool(true)):
@@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error)
result = append(result, fmt.Sprintf("--%s", name))
}
case reflect.TypeOf(time.Duration(0)):
- if iface.(time.Duration) != time.Duration(0) {
+ if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%s", name, iface))
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
index b0d3b651e..7fdc8aa23 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
@@ -45,6 +45,83 @@ func orAction(a, b LabelFilter) LabelFilter {
return func(labels []string) bool { return a(labels) || b(labels) }
}
+func labelSetFor(key string, labels []string) map[string]bool {
+ key = strings.ToLower(strings.TrimSpace(key))
+ out := map[string]bool{}
+ for _, label := range labels {
+ components := strings.SplitN(label, ":", 2)
+ if len(components) < 2 {
+ continue
+ }
+ if key == strings.ToLower(strings.TrimSpace(components[0])) {
+ out[strings.ToLower(strings.TrimSpace(components[1]))] = true
+ }
+ }
+
+ return out
+}
+
+func isEmptyLabelSetAction(key string) LabelFilter {
+ return func(labels []string) bool {
+ return len(labelSetFor(key, labels)) == 0
+ }
+}
+
+func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter {
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ for _, value := range expectedValues {
+ if set[value] {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter {
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ for _, value := range expectedValues {
+ if !set[value] {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter {
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ if len(set) != len(expectedValues) {
+ return false
+ }
+ for _, value := range expectedValues {
+ if !set[value] {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter {
+ expectedSet := map[string]bool{}
+ for _, value := range expectedValues {
+ expectedSet[value] = true
+ }
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ for value := range set {
+ if !expectedSet[value] {
+ return false
+ }
+ }
+ return true
+ }
+}
+
type lfToken uint
const (
@@ -58,6 +135,9 @@ const (
lfTokenOr
lfTokenRegexp
lfTokenLabel
+ lfTokenSetKey
+ lfTokenSetOperation
+ lfTokenSetArgument
lfTokenEOF
)
@@ -71,6 +151,8 @@ func (l lfToken) Precedence() int {
return 2
case lfTokenNot:
return 3
+ case lfTokenSetOperation:
+ return 4
}
return -1
}
@@ -93,6 +175,12 @@ func (l lfToken) String() string {
return "/regexp/"
case lfTokenLabel:
return "label"
+ case lfTokenSetKey:
+ return "set_key"
+ case lfTokenSetOperation:
+ return "set_operation"
+ case lfTokenSetArgument:
+ return "set_argument"
case lfTokenEOF:
return "EOF"
}
@@ -148,6 +236,35 @@ func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) {
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err))
}
return matchLabelRegexAction(re), nil
+ case lfTokenSetOperation:
+ tokenSetOperation := strings.ToLower(tn.value)
+ if tokenSetOperation == "isempty" {
+ return isEmptyLabelSetAction(tn.leftNode.value), nil
+ }
+ if tn.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value))
+ }
+
+ rawValues := strings.Split(tn.rightNode.value, ",")
+ values := make([]string, len(rawValues))
+ for i := range rawValues {
+ values[i] = strings.ToLower(strings.TrimSpace(rawValues[i]))
+ if strings.ContainsAny(values[i], "&|!,()/") {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i]))
+ } else if values[i] == "" {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.")
+ }
+ }
+ switch tokenSetOperation {
+ case "containsany":
+ return containsAnyLabelSetAction(tn.leftNode.value, values), nil
+ case "containsall":
+ return containsAllLabelSetAction(tn.leftNode.value, values), nil
+ case "consistsof":
+ return consistsOfLabelSetAction(tn.leftNode.value, values), nil
+ case "issubsetof":
+ return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil
+ }
}
if tn.rightNode == nil {
@@ -203,7 +320,17 @@ func (tn *treeNode) toString(indent int) string {
return out
}
+var validSetOperations = map[string]string{
+ "containsany": "containsAny",
+ "containsall": "containsAll",
+ "consistsof": "consistsOf",
+ "issubsetof": "isSubsetOf",
+ "isempty": "isEmpty",
+}
+
func tokenize(input string) func() (*treeNode, error) {
+ lastToken := lfTokenInvalid
+ lastValue := ""
runes, i := []rune(input), 0
peekIs := func(r rune) bool {
@@ -233,6 +360,53 @@ func tokenize(input string) func() (*treeNode, error) {
}
node := &treeNode{location: i}
+ defer func() {
+ lastToken = node.token
+ lastValue = node.value
+ }()
+
+ if lastToken == lfTokenSetKey {
+ //we should get a valid set operation next
+ value, n := consumeUntil(" )")
+ if validSetOperations[strings.ToLower(value)] == "" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value))
+ }
+ i += n
+ node.token, node.value = lfTokenSetOperation, value
+ return node, nil
+ }
+ if lastToken == lfTokenSetOperation {
+ //we should get an argument next, if we aren't isempty
+ var arg = ""
+ origI := i
+ if runes[i] == '{' {
+ i += 1
+ value, n := consumeUntil("}")
+ if i+n >= len(runes) {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?")
+ }
+ i += n + 1
+ arg = value
+ } else {
+ value, n := consumeUntil("&|!,()/")
+ i += n
+ arg = strings.TrimSpace(value)
+ }
+ if strings.ToLower(lastValue) == "isempty" && arg != "" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg))
+ }
+ if arg == "" && strings.ToLower(lastValue) != "isempty" {
+ if i < len(runes) && runes[i] == '/' {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.")
+ } else {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue))
+ }
+ }
+ // note that we sent an empty SetArgument token if we are isempty
+ node.token, node.value = lfTokenSetArgument, arg
+ return node, nil
+ }
+
switch runes[i] {
case '&':
if !peekIs('&') {
@@ -264,8 +438,38 @@ func tokenize(input string) func() (*treeNode, error) {
i += n + 1
node.token, node.value = lfTokenRegexp, value
default:
- value, n := consumeUntil("&|!,()/")
+ value, n := consumeUntil("&|!,()/:")
i += n
+ value = strings.TrimSpace(value)
+
+ //are we the beginning of a set operation?
+ if i < len(runes) && runes[i] == ':' {
+ if peekIs(' ') {
+ if value == "" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.")
+ }
+ i += 1
+ //we are the beginning of a set operation
+ node.token, node.value = lfTokenSetKey, value
+ return node, nil
+ }
+ additionalValue, n := consumeUntil("&|!,()/")
+ additionalValue = strings.TrimSpace(additionalValue)
+ if additionalValue == ":" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.")
+ }
+ i += n
+ value += additionalValue
+ }
+
+ valueToCheckForSetOperation := strings.ToLower(value)
+ for setOperation := range validSetOperations {
+ idx := strings.Index(valueToCheckForSetOperation, " "+setOperation)
+ if idx > 0 {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation]))
+ }
+ }
+
node.token, node.value = lfTokenLabel, strings.TrimSpace(value)
}
return node, nil
@@ -307,7 +511,7 @@ LOOP:
switch node.token {
case lfTokenEOF:
break LOOP
- case lfTokenLabel, lfTokenRegexp:
+ case lfTokenLabel, lfTokenRegexp, lfTokenSetKey:
if current.rightNode != nil {
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.")
}
@@ -326,6 +530,18 @@ LOOP:
node.setLeftNode(nodeToStealFrom.rightNode)
nodeToStealFrom.setRightNode(node)
current = node
+ case lfTokenSetOperation:
+ if current.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value))
+ }
+ node.setLeftNode(current.rightNode)
+ current.setRightNode(node)
+ current = node
+ case lfTokenSetArgument:
+ if current.rightNode != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token))
+ }
+ current.setRightNode(node)
case lfTokenCloseGroup:
firstUnmatchedOpenNode := current.firstUnmatchedOpenNode()
if firstUnmatchedOpenNode == nil {
@@ -354,5 +570,14 @@ func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) {
if strings.ContainsAny(out, "&|!,()/") {
return "", GinkgoErrors.InvalidLabel(label, cl)
}
+ if out[0] == ':' {
+ return "", GinkgoErrors.InvalidLabel(label, cl)
+ }
+ if strings.Contains(out, ":") {
+ components := strings.SplitN(out, ":", 2)
+ if len(components) < 2 || components[1] == "" {
+ return "", GinkgoErrors.InvalidLabel(label, cl)
+ }
+ }
return out, nil
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go
index ed9346474..acab03492 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/version.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go
@@ -1,3 +1,3 @@
package types
-const VERSION = "2.15.0"
+const VERSION = "2.19.0"
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index 01ec5245c..62af14ad2 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,22 @@
+## 1.33.1
+
+### Fixes
+- fix confusing eventually docs [3a66379]
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.17.1 to 2.17.2 [e9bc35a]
+
+## 1.33.0
+
+### Features
+
+`Receive` not accepts `Receive(, MATCHER>)`, allowing you to pick out a specific value on the channel that satisfies the provided matcher and is stored in the provided pointer.
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.15.0 to 2.17.1 (#745) [9999deb]
+- Bump github-pages from 229 to 230 in /docs (#735) [cb5ff21]
+- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#746) [bac6596]
+
## 1.32.0
### Maintenance
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index ffb81b1fe..9697d5134 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -22,7 +22,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.32.0"
+const GOMEGA_VERSION = "1.33.1"
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
@@ -372,11 +372,11 @@ You can ensure that you get a number of consecutive successful tries before succ
Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods:
- Eventually(..., "1s", "2s", ctx).Should(...)
+ Eventually(..., "10s", "2s", ctx).Should(...)
is equivalent to
- Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...)
+ Eventually(...).WithTimeout(10*time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...)
*/
func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go
index 8860d677f..7ef27dc9c 100644
--- a/vendor/github.com/onsi/gomega/matchers.go
+++ b/vendor/github.com/onsi/gomega/matchers.go
@@ -194,20 +194,21 @@ func BeClosed() types.GomegaMatcher {
//
// will repeatedly attempt to pull values out of `c` until a value matching "bar" is received.
//
-// Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
+// Furthermore, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
//
// var myThing thing
// Eventually(thingChan).Should(Receive(&myThing))
// Expect(myThing.Sprocket).Should(Equal("foo"))
// Expect(myThing.IsValid()).Should(BeTrue())
+//
+// Finally, if you want to match the received object as well as get the actual received value into a variable, so you can reason further about the value received,
+// you can pass a pointer to a variable of the approriate type first, and second a matcher:
+//
+// var myThing thing
+// Eventually(thingChan).Should(Receive(&myThing, ContainSubstring("bar")))
func Receive(args ...interface{}) types.GomegaMatcher {
- var arg interface{}
- if len(args) > 0 {
- arg = args[0]
- }
-
return &matchers.ReceiveMatcher{
- Arg: arg,
+ Args: args,
}
}
diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
index 1936a2ba5..948164eaf 100644
--- a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
@@ -3,6 +3,7 @@
package matchers
import (
+ "errors"
"fmt"
"reflect"
@@ -10,7 +11,7 @@ import (
)
type ReceiveMatcher struct {
- Arg interface{}
+ Args []interface{}
receivedValue reflect.Value
channelClosed bool
}
@@ -29,15 +30,38 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
var subMatcher omegaMatcher
var hasSubMatcher bool
-
- if matcher.Arg != nil {
- subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher)
+ var resultReference interface{}
+
+ // Valid arg formats are as follows, always with optional POINTER before
+ // optional MATCHER:
+ // - Receive()
+ // - Receive(POINTER)
+ // - Receive(MATCHER)
+ // - Receive(POINTER, MATCHER)
+ args := matcher.Args
+ if len(args) > 0 {
+ arg := args[0]
+ _, isSubMatcher := arg.(omegaMatcher)
+ if !isSubMatcher && reflect.ValueOf(arg).Kind() == reflect.Ptr {
+ // Consume optional POINTER arg first, if it ain't no matcher ;)
+ resultReference = arg
+ args = args[1:]
+ }
+ }
+ if len(args) > 0 {
+ arg := args[0]
+ subMatcher, hasSubMatcher = arg.(omegaMatcher)
if !hasSubMatcher {
- argType := reflect.TypeOf(matcher.Arg)
- if argType.Kind() != reflect.Ptr {
- return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1))
- }
+ // At this point we assume the dev user wanted to assign a received
+ // value, so [POINTER,]MATCHER.
+ return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(arg, 1))
}
+ // Consume optional MATCHER arg.
+ args = args[1:]
+ }
+ if len(args) > 0 {
+ // If there are still args present, reject all.
+ return false, errors.New("Receive matcher expects at most an optional pointer and/or an optional matcher")
}
winnerIndex, value, open := reflect.Select([]reflect.SelectCase{
@@ -58,16 +82,20 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
}
if hasSubMatcher {
- if didReceive {
- matcher.receivedValue = value
- return subMatcher.Match(matcher.receivedValue.Interface())
+ if !didReceive {
+ return false, nil
}
- return false, nil
+ matcher.receivedValue = value
+ if match, err := subMatcher.Match(matcher.receivedValue.Interface()); err != nil || !match {
+ return match, err
+ }
+ // if we received a match, then fall through in order to handle an
+ // optional assignment of the received value to the specified reference.
}
if didReceive {
- if matcher.Arg != nil {
- outValue := reflect.ValueOf(matcher.Arg)
+ if resultReference != nil {
+ outValue := reflect.ValueOf(resultReference)
if value.Type().AssignableTo(outValue.Elem().Type()) {
outValue.Elem().Set(value)
@@ -77,7 +105,7 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
outValue.Elem().Set(value.Elem())
return true, nil
} else {
- return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(matcher.Arg, 1))
+ return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(resultReference, 1))
}
}
@@ -88,7 +116,11 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
}
func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) {
- subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher)
+ var matcherArg interface{}
+ if len(matcher.Args) > 0 {
+ matcherArg = matcher.Args[len(matcher.Args)-1]
+ }
+ subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher)
closedAddendum := ""
if matcher.channelClosed {
@@ -105,7 +137,11 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin
}
func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher)
+ var matcherArg interface{}
+ if len(matcher.Args) > 0 {
+ matcherArg = matcher.Args[len(matcher.Args)-1]
+ }
+ subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher)
closedAddendum := ""
if matcher.channelClosed {
diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go
index f482db480..f99dedf43 100644
--- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go
+++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/alertmanager_types.go
@@ -240,14 +240,27 @@ type AlertmanagerSpec struct {
HostAliases []HostAlias `json:"hostAliases,omitempty"`
// Defines the web command line flags when starting Alertmanager.
Web *AlertmanagerWebSpec `json:"web,omitempty"`
- // EXPERIMENTAL: alertmanagerConfiguration specifies the configuration of Alertmanager.
+ // alertmanagerConfiguration specifies the configuration of Alertmanager.
+ //
// If defined, it takes precedence over the `configSecret` field.
- // This field may change in future releases.
+ //
+ // This is an *experimental feature*, it may change in any upcoming release
+ // in a breaking way.
+ //
+ //+optional
AlertmanagerConfiguration *AlertmanagerConfiguration `json:"alertmanagerConfiguration,omitempty"`
// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod.
// If the service account has `automountServiceAccountToken: true`, set the field to `false` to opt out of automounting API credentials.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
+ // Enable access to Alertmanager feature flags. By default, no features are enabled.
+ // Enabling features which are disabled by default is entirely outside the
+ // scope of what the maintainers will support and by doing so, you accept
+ // that this behaviour may break at any time without notice.
+ //
+ // It requires Alertmanager >= 0.27.0.
+ // +optional
+ EnableFeatures []string `json:"enableFeatures,omitempty"`
}
// AlertmanagerConfigMatcherStrategy defines the strategy used by AlertmanagerConfig objects to match alerts.
diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go
index 2fdd107f9..aa0217501 100644
--- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go
+++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/podmonitor_types.go
@@ -87,6 +87,17 @@ type PodMonitorSpec struct {
// +optional
TargetLimit *uint64 `json:"targetLimit,omitempty"`
+ // `scrapeProtocols` defines the protocols to negotiate during a scrape. It tells clients the
+ // protocols supported by Prometheus in order of preference (from most to least preferred).
+ //
+ // If unset, Prometheus uses its default value.
+ //
+ // It requires Prometheus >= v2.49.0.
+ //
+ // +listType=set
+ // +optional
+ ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"`
+
// Per-scrape limit on number of labels that will be accepted for a sample.
//
// It requires Prometheus >= v2.27.0.
@@ -120,6 +131,19 @@ type PodMonitorSpec struct {
//
// +optional
AttachMetadata *AttachMetadata `json:"attachMetadata,omitempty"`
+
+ // The scrape class to apply.
+ // +optional
+ // +kubebuilder:validation:MinLength=1
+ ScrapeClassName *string `json:"scrapeClass,omitempty"`
+
+ // When defined, bodySizeLimit specifies a job level limit on the size
+ // of uncompressed response body that will be accepted by Prometheus.
+ //
+ // It requires Prometheus >= v2.28.0.
+ //
+ // +optional
+ BodySizeLimit *ByteSize `json:"bodySizeLimit,omitempty"`
}
// PodMonitorList is a list of PodMonitors.
@@ -186,7 +210,7 @@ type PodMetricsEndpoint struct {
// TLS configuration to use when scraping the target.
//
// +optional
- TLSConfig *PodMetricsEndpointTLSConfig `json:"tlsConfig,omitempty"`
+ TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"`
// `bearerTokenSecret` specifies a key of a Secret containing the bearer
// token for scraping targets. The secret needs to be in the same namespace
@@ -245,7 +269,7 @@ type PodMetricsEndpoint struct {
// samples before ingestion.
//
// +optional
- MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"`
+ MetricRelabelConfigs []RelabelConfig `json:"metricRelabelings,omitempty"`
// `relabelings` configures the relabeling rules to apply the target's
// metadata labels.
@@ -257,7 +281,7 @@ type PodMetricsEndpoint struct {
// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
//
// +optional
- RelabelConfigs []*RelabelConfig `json:"relabelings,omitempty"`
+ RelabelConfigs []RelabelConfig `json:"relabelings,omitempty"`
// `proxyURL` configures the HTTP Proxy URL (e.g.
// "http://proxyserver:2195") to go through when scraping the target.
@@ -286,9 +310,3 @@ type PodMetricsEndpoint struct {
// +optional
FilterRunning *bool `json:"filterRunning,omitempty"`
}
-
-// PodMetricsEndpointTLSConfig specifies TLS configuration parameters.
-// +k8s:openapi-gen=true
-type PodMetricsEndpointTLSConfig struct {
- SafeTLSConfig `json:",inline"`
-}
diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go
index 59b85ae6a..4e8427c6c 100644
--- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go
+++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/probe_types.go
@@ -64,7 +64,7 @@ type ProbeSpec struct {
// If not specified, the Prometheus global scrape timeout is used.
ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"`
// TLS configuration to use when scraping the endpoint.
- TLSConfig *ProbeTLSConfig `json:"tlsConfig,omitempty"`
+ TLSConfig *SafeTLSConfig `json:"tlsConfig,omitempty"`
// Secret to mount to read bearer token for scraping targets. The secret
// needs to be in the same namespace as the probe and accessible by
// the Prometheus Operator.
@@ -75,7 +75,7 @@ type ProbeSpec struct {
// OAuth2 for the URL. Only valid in Prometheus versions 2.27.0 and newer.
OAuth2 *OAuth2 `json:"oauth2,omitempty"`
// MetricRelabelConfigs to apply to samples before ingestion.
- MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"`
+ MetricRelabelConfigs []RelabelConfig `json:"metricRelabelings,omitempty"`
// Authorization section for this endpoint
Authorization *SafeAuthorization `json:"authorization,omitempty"`
// SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
@@ -84,6 +84,16 @@ type ProbeSpec struct {
// TargetLimit defines a limit on the number of scraped targets that will be accepted.
// +optional
TargetLimit *uint64 `json:"targetLimit,omitempty"`
+ // `scrapeProtocols` defines the protocols to negotiate during a scrape. It tells clients the
+ // protocols supported by Prometheus in order of preference (from most to least preferred).
+ //
+ // If unset, Prometheus uses its default value.
+ //
+ // It requires Prometheus >= v2.49.0.
+ //
+ // +listType=set
+ // +optional
+ ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"`
// Per-scrape limit on number of labels that will be accepted for a sample.
// Only valid in Prometheus versions 2.27.0 and newer.
// +optional
@@ -103,6 +113,11 @@ type ProbeSpec struct {
//
// +optional
KeepDroppedTargets *uint64 `json:"keepDroppedTargets,omitempty"`
+
+ // The scrape class to apply.
+ // +optional
+ // +kubebuilder:validation:MinLength=1
+ ScrapeClassName *string `json:"scrapeClass,omitempty"`
}
// ProbeTargets defines how to discover the probed targets.
@@ -151,7 +166,7 @@ type ProbeTargetStaticConfig struct {
// RelabelConfigs to apply to the label set of the targets before it gets
// scraped.
// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
- RelabelConfigs []*RelabelConfig `json:"relabelingConfigs,omitempty"`
+ RelabelConfigs []RelabelConfig `json:"relabelingConfigs,omitempty"`
}
// ProbeTargetIngress defines the set of Ingress objects considered for probing.
@@ -169,7 +184,7 @@ type ProbeTargetIngress struct {
// probed URL.
// The original scrape job's name is available via the `__tmp_prometheus_job_name` label.
// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
- RelabelConfigs []*RelabelConfig `json:"relabelingConfigs,omitempty"`
+ RelabelConfigs []RelabelConfig `json:"relabelingConfigs,omitempty"`
}
// ProberSpec contains specification parameters for the Prober used for probing.
@@ -205,9 +220,3 @@ type ProbeList struct {
func (l *ProbeList) DeepCopyObject() runtime.Object {
return l.DeepCopy()
}
-
-// ProbeTLSConfig specifies TLS configuration parameters for the prober.
-// +k8s:openapi-gen=true
-type ProbeTLSConfig struct {
- SafeTLSConfig `json:",inline"`
-}
diff --git a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go
index 4b66d0e19..e22d785fb 100644
--- a/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go
+++ b/vendor/github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1/prometheus_types.go
@@ -32,6 +32,15 @@ const (
PrometheusKindKey = "prometheus"
)
+// ScrapeProtocol represents a protocol used by Prometheus for scraping metrics.
+// Supported values are:
+// * `OpenMetricsText0.0.1`
+// * `OpenMetricsText1.0.0`
+// * `PrometheusProto`
+// * `PrometheusText0.0.4`
+// +kubebuilder:validation:Enum=PrometheusProto;OpenMetricsText0.0.1;OpenMetricsText1.0.0;PrometheusText0.0.4
+type ScrapeProtocol string
+
// PrometheusInterface is used by Prometheus and PrometheusAgent to share common methods, e.g. config generation.
// +k8s:deepcopy-gen=false
type PrometheusInterface interface {
@@ -79,6 +88,9 @@ type TopologySpreadConstraint struct {
AdditionalLabelSelectors *AdditionalLabelSelectors `json:"additionalLabelSelectors,omitempty"`
}
+// +kubebuilder:validation:MinLength:=1
+type EnableFeature string
+
// CommonPrometheusFields are the options available to both the Prometheus server and agent.
// +k8s:deepcopy-gen=true
type CommonPrometheusFields struct {
@@ -108,13 +120,12 @@ type CommonPrometheusFields struct {
// `spec.additionalScrapeConfigs` instead.
ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"`
// Namespaces to match for ServicedMonitors discovery. An empty label selector
- // matches all namespaces. A null label selector matches the current
+ // matches all namespaces. A null label selector (default value) matches the current
// namespace only.
ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"`
- // *Experimental* PodMonitors to be selected for target discovery. An empty
- // label selector matches all objects. A null label selector matches no
- // objects.
+ // PodMonitors to be selected for target discovery. An empty label selector
+ // matches all objects. A null label selector matches no objects.
//
// If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector`
// and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged.
@@ -126,13 +137,12 @@ type CommonPrometheusFields struct {
// `spec.additionalScrapeConfigs` instead.
PodMonitorSelector *metav1.LabelSelector `json:"podMonitorSelector,omitempty"`
// Namespaces to match for PodMonitors discovery. An empty label selector
- // matches all namespaces. A null label selector matches the current
+ // matches all namespaces. A null label selector (default value) matches the current
// namespace only.
PodMonitorNamespaceSelector *metav1.LabelSelector `json:"podMonitorNamespaceSelector,omitempty"`
- // *Experimental* Probes to be selected for target discovery. An empty
- // label selector matches all objects. A null label selector matches no
- // objects.
+ // Probes to be selected for target discovery. An empty label selector
+ // matches all objects. A null label selector matches no objects.
//
// If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector`
// and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged.
@@ -143,14 +153,13 @@ type CommonPrometheusFields struct {
// of the custom resource definition. It is recommended to use
// `spec.additionalScrapeConfigs` instead.
ProbeSelector *metav1.LabelSelector `json:"probeSelector,omitempty"`
- // *Experimental* Namespaces to match for Probe discovery. An empty label
+ // Namespaces to match for Probe discovery. An empty label
// selector matches all namespaces. A null label selector matches the
// current namespace only.
ProbeNamespaceSelector *metav1.LabelSelector `json:"probeNamespaceSelector,omitempty"`
- // *Experimental* ScrapeConfigs to be selected for target discovery. An
- // empty label selector matches all objects. A null label selector matches
- // no objects.
+ // ScrapeConfigs to be selected for target discovery. An empty label
+ // selector matches all objects. A null label selector matches no objects.
//
// If `spec.serviceMonitorSelector`, `spec.podMonitorSelector`, `spec.probeSelector`
// and `spec.scrapeConfigSelector` are null, the Prometheus configuration is unmanaged.
@@ -160,10 +169,18 @@ type CommonPrometheusFields struct {
// This behavior is *deprecated* and will be removed in the next major version
// of the custom resource definition. It is recommended to use
// `spec.additionalScrapeConfigs` instead.
+ //
+ // Note that the ScrapeConfig custom resource definition is currently at Alpha level.
+ //
+ // +optional
ScrapeConfigSelector *metav1.LabelSelector `json:"scrapeConfigSelector,omitempty"`
// Namespaces to match for ScrapeConfig discovery. An empty label selector
// matches all namespaces. A null label selector matches the current
- // current namespace only.
+ // namespace only.
+ //
+ // Note that the ScrapeConfig custom resource definition is currently at Alpha level.
+ //
+ // +optional
ScrapeConfigNamespaceSelector *metav1.LabelSelector `json:"scrapeConfigNamespaceSelector,omitempty"`
// Version of Prometheus being deployed. The operator uses this information
@@ -206,7 +223,7 @@ type CommonPrometheusFields struct {
// Default: 1
// +optional
Replicas *int32 `json:"replicas,omitempty"`
- // EXPERIMENTAL: Number of shards to distribute targets onto. `spec.replicas`
+ // Number of shards to distribute targets onto. `spec.replicas`
// multiplied by `spec.shards` is the total number of Pods created.
//
// Note that scaling down shards will not reshard data onto remaining
@@ -252,6 +269,17 @@ type CommonPrometheusFields struct {
// Number of seconds to wait until a scrape request times out.
ScrapeTimeout Duration `json:"scrapeTimeout,omitempty"`
+ // The protocols to negotiate during a scrape. It tells clients the
+ // protocols supported by Prometheus in order of preference (from most to least preferred).
+ //
+ // If unset, Prometheus uses its default value.
+ //
+ // It requires Prometheus >= v2.49.0.
+ //
+ // +listType=set
+ // +optional
+ ScrapeProtocols []ScrapeProtocol `json:"scrapeProtocols,omitempty"`
+
// The labels to add to any time series or alerts when communicating with
// external systems (federation, remote storage, Alertmanager).
// Labels defined by `spec.replicaExternalLabelName` and
@@ -277,7 +305,10 @@ type CommonPrometheusFields struct {
// that this behaviour may break at any time without notice.
//
// For more information see https://prometheus.io/docs/prometheus/latest/feature_flags/
- EnableFeatures []string `json:"enableFeatures,omitempty"`
+ //
+ // +listType:=set
+ // +optional
+ EnableFeatures []EnableFeature `json:"enableFeatures,omitempty"`
// The external URL under which the Prometheus service is externally
// available. This is necessary to generate correct URLs (for instance if
@@ -325,6 +356,14 @@ type CommonPrometheusFields struct {
// Prometheus Pods.
ServiceAccountName string `json:"serviceAccountName,omitempty"`
+ // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod.
+ // If the field isn't set, the operator mounts the service account token by default.
+ //
+ // **Warning:** be aware that by default, Prometheus requires the service account token for Kubernetes service discovery.
+ // It is possible to use strategic merge patch to project the service account token into the 'prometheus' container.
+ // +optional
+ AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
+
// Secrets is a list of Secrets in the same namespace as the Prometheus
// object, which shall be mounted into the Prometheus Pods.
// Each Secret is added to the StatefulSet definition as a volume named `secret-`.
@@ -436,10 +475,11 @@ type CommonPrometheusFields struct {
// `spec.bearerTokenSecret` field.
ArbitraryFSAccessThroughSMs ArbitraryFSAccessThroughSMsConfig `json:"arbitraryFSAccessThroughSMs,omitempty"`
- // When true, Prometheus resolves label conflicts by renaming the labels in
- // the scraped data to "exported_